aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/x86/smm
diff options
context:
space:
mode:
authorArthur Heymans <arthur@aheymans.xyz>2022-11-01 23:45:59 +0100
committerArthur Heymans <arthur@aheymans.xyz>2022-11-07 13:59:35 +0000
commit66b2888b77da6721955a918c8cd5399abe786a6a (patch)
tree1dded45a911f18ffd86a80b56add28526209f66f /src/cpu/x86/smm
parente2d291b5ae4aa49d5b1613e06b86bf2fc8efe4c5 (diff)
cpu/x86: Drop LEGACY_SMP_INIT
This codepath is deprecated after the 4.18 release. Change-Id: I7e90f457f3979781d06323ef1350d5fb05a6be43 Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-on: https://review.coreboot.org/c/coreboot/+/69121 Reviewed-by: Elyes Haouas <ehaouas@noos.fr> Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Angel Pons <th3fanbus@gmail.com>
Diffstat (limited to 'src/cpu/x86/smm')
-rw-r--r--src/cpu/x86/smm/Makefile.inc14
-rw-r--r--src/cpu/x86/smm/smihandler.c207
-rw-r--r--src/cpu/x86/smm/smm.ld65
-rw-r--r--src/cpu/x86/smm/smmhandler.S258
-rw-r--r--src/cpu/x86/smm/smmrelocate.S163
5 files changed, 0 insertions, 707 deletions
diff --git a/src/cpu/x86/smm/Makefile.inc b/src/cpu/x86/smm/Makefile.inc
index 5f695909a8..da93827274 100644
--- a/src/cpu/x86/smm/Makefile.inc
+++ b/src/cpu/x86/smm/Makefile.inc
@@ -76,17 +76,3 @@ $(obj)/smm/smm: $(obj)/smm/smm.elf.rmod
$(OBJCOPY_smm) -O binary $< $@
endif
-
-ifeq ($(CONFIG_SMM_LEGACY_ASEG),y)
-
-smm-y += smm.ld
-
-$(obj)/smm/smm: $(obj)/smm/smm.o $(call src-to-obj,smm,$(src)/cpu/x86/smm/smm.ld)
- $(LD_smm) $(LDFLAGS_smm) -o $(obj)/smm/smm.elf -T $(call src-to-obj,smm,$(src)/cpu/x86/smm/smm.ld) $(obj)/smm/smm.o
- $(NM_smm) -n $(obj)/smm/smm.elf | sort > $(obj)/smm/smm.map
- $(OBJCOPY_smm) -O binary $(obj)/smm/smm.elf $@
-
-smm-y += smmhandler.S
-smm-y += smihandler.c
-
-endif # CONFIG_SMM_LEGACY_ASEG
diff --git a/src/cpu/x86/smm/smihandler.c b/src/cpu/x86/smm/smihandler.c
deleted file mode 100644
index 7946e90aea..0000000000
--- a/src/cpu/x86/smm/smihandler.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#include <arch/io.h>
-#include <console/console.h>
-#include <commonlib/region.h>
-#include <cpu/x86/smm.h>
-#include <cpu/x86/smi_deprecated.h>
-#include <cpu/amd/amd64_save_state.h>
-#include <cpu/intel/em64t100_save_state.h>
-#include <cpu/intel/em64t101_save_state.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/lapic_def.h>
-#include <cpu/x86/legacy_save_state.h>
-
-#if CONFIG(SPI_FLASH_SMM)
-#include <spi-generic.h>
-#endif
-
-static int do_driver_init = 1;
-
-typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
-
-/* SMI multiprocessing semaphore */
-static __attribute__((aligned(4))) volatile smi_semaphore smi_handler_status
- = SMI_UNLOCKED;
-
-static int smi_obtain_lock(void)
-{
- u8 ret = SMI_LOCKED;
-
- asm volatile (
- "movb %2, %%al\n"
- "xchgb %%al, %1\n"
- "movb %%al, %0\n"
- : "=g" (ret), "=m" (smi_handler_status)
- : "g" (SMI_LOCKED)
- : "eax"
- );
-
- return (ret == SMI_UNLOCKED);
-}
-
-static void smi_release_lock(void)
-{
- asm volatile (
- "movb %1, %%al\n"
- "xchgb %%al, %0\n"
- : "=m" (smi_handler_status)
- : "g" (SMI_UNLOCKED)
- : "eax"
- );
-}
-
-void io_trap_handler(int smif)
-{
- /* If a handler function handled a given IO trap, it
- * shall return a non-zero value
- */
- printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
-
- if (southbridge_io_trap_handler(smif))
- return;
-
- if (mainboard_io_trap_handler(smif))
- return;
-
- printk(BIOS_DEBUG, "Unknown function\n");
-}
-
-/**
- * @brief Set the EOS bit
- */
-static void smi_set_eos(void)
-{
- southbridge_smi_set_eos();
-}
-
-static u32 pci_orig;
-
-/**
- * @brief Backup PCI address to make sure we do not mess up the OS
- */
-static void smi_backup_pci_address(void)
-{
- pci_orig = inl(0xcf8);
-}
-
-/**
- * @brief Restore PCI address previously backed up
- */
-static void smi_restore_pci_address(void)
-{
- outl(pci_orig, 0xcf8);
-}
-
-static inline void *smm_save_state(uintptr_t base, int arch_offset, int node)
-{
- base += SMM_SAVE_STATE_BEGIN(arch_offset) - (node * 0x400);
- return (void *)base;
-}
-
-/* This returns the SMM revision from the savestate of CPU0,
- which is assumed to be the same for all CPU's. See the memory
- map in smmhandler.S */
-uint32_t smm_revision(void)
-{
- return *(uint32_t *)(SMM_BASE + SMM_ENTRY_OFFSET * 2 - SMM_REVISION_OFFSET_FROM_TOP);
-}
-
-void *smm_get_save_state(int cpu)
-{
- switch (smm_revision()) {
- case 0x00030002:
- case 0x00030007:
- return smm_save_state(SMM_BASE, SMM_LEGACY_ARCH_OFFSET, cpu);
- case 0x00030100:
- return smm_save_state(SMM_BASE, SMM_EM64T100_ARCH_OFFSET, cpu);
- case 0x00030101: /* SandyBridge, IvyBridge, and Haswell */
- return smm_save_state(SMM_BASE, SMM_EM64T101_ARCH_OFFSET, cpu);
- case 0x00020064:
- case 0x00030064:
- return smm_save_state(SMM_BASE, SMM_AMD64_ARCH_OFFSET, cpu);
- }
-
- return NULL;
-}
-
-bool smm_region_overlaps_handler(const struct region *r)
-{
- const struct region r_smm = {SMM_BASE, SMM_DEFAULT_SIZE};
-
- return region_overlap(&r_smm, r);
-}
-
-/**
- * @brief Interrupt handler for SMI#
- *
- * @param smm_revision revision of the smm state save map
- */
-
-void smi_handler(void)
-{
- unsigned int node;
-
- /* Are we ok to execute the handler? */
- if (!smi_obtain_lock()) {
- /* For security reasons we don't release the other CPUs
- * until the CPU with the lock is actually done
- */
- while (smi_handler_status == SMI_LOCKED) {
- asm volatile (
- ".byte 0xf3, 0x90\n" /* hint a CPU we are in
- * spinlock (PAUSE
- * instruction, REP NOP)
- */
- );
- }
- return;
- }
-
- smi_backup_pci_address();
-
- node = lapicid();
-
- console_init();
-
- printk(BIOS_SPEW, "\nSMI# #%d\n", node);
-
- /* Use smm_get_save_state() to see if the smm revision is supported */
- if (smm_get_save_state(node) == NULL) {
- printk(BIOS_WARNING, "smm_revision: 0x%08x\n", smm_revision());
- printk(BIOS_WARNING, "SMI# not supported on your CPU\n");
- /* Don't release lock, so no further SMI will happen,
- * if we don't handle it anyways.
- */
- return;
- }
-
- /* Allow drivers to initialize variables in SMM context. */
- if (do_driver_init) {
-#if CONFIG(SPI_FLASH_SMM)
- spi_init();
-#endif
- do_driver_init = 0;
- }
-
- /* Call chipset specific SMI handlers. */
- southbridge_smi_handler();
-
- smi_restore_pci_address();
-
- smi_release_lock();
-
- /* De-assert SMI# signal to allow another SMI */
- smi_set_eos();
-}
-
-/* Provide a default implementation for all weak handlers so that relocation
- * entries in the modules make sense. Without default implementations the
- * weak relocations w/o a symbol have a 0 address which is where the modules
- * are linked at. */
-int __weak mainboard_io_trap_handler(int smif) { return 0; }
-void __weak southbridge_smi_handler(void) {}
-void __weak mainboard_smi_gpi(u32 gpi_sts) {}
-int __weak mainboard_smi_apmc(u8 data) { return 0; }
-void __weak mainboard_smi_sleep(u8 slp_typ) {}
-void __weak mainboard_smi_finalize(void) {}
diff --git a/src/cpu/x86/smm/smm.ld b/src/cpu/x86/smm/smm.ld
deleted file mode 100644
index e232028e4b..0000000000
--- a/src/cpu/x86/smm/smm.ld
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-/* Maximum number of CPUs/cores */
-CPUS = 4;
-
-_ = ASSERT(CPUS >= CONFIG_MAX_CPUS, "The ASEG SMM code only supports up to 4 CPUS");
-
-ENTRY(smm_handler_start);
-
-SECTIONS
-{
- /* This is the actual SMM handler.
- *
- * We just put code, rodata, data and bss all in a row.
- */
- . = 0xa0000;
- .handler (.): {
- _program = .;
- /* Assembler stub */
- *(.handler)
-
- /* C code of the SMM handler */
- *(.text);
- *(.text.*);
-
- /* C read-only data of the SMM handler */
- . = ALIGN(16);
- *(.rodata)
- *(.rodata.*)
-
- /* C read-write data of the SMM handler */
- . = ALIGN(4);
- *(.data)
- *(.data.*)
-
- /* C uninitialized data of the SMM handler */
- . = ALIGN(4);
- *(.bss)
- *(.bss.*)
- *(.sbss)
- *(.sbss.*)
-
- /* What is this? (Something we don't need with -fno-common.) */
- *(COMMON)
- . = ALIGN(4);
- _eprogram = .;
- }
-
- /* We are using the ASEG interleaved to stuff the SMM handlers
- * for all CPU cores in there. The jump table redirects the execution
- * to the actual SMM handler
- */
- . = 0xa8000 - (( CPUS - 1) * 0x400);
- .jumptable : {
- KEEP(*(.jumptable));
- }
-
- /DISCARD/ : {
- *(.comment)
- *(.note)
- *(.note.*)
- *(.eh_frame)
- *(.debug_*)
- }
-}
diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S
deleted file mode 100644
index 9e7108ccf5..0000000000
--- a/src/cpu/x86/smm/smmhandler.S
+++ /dev/null
@@ -1,258 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-/* NOTE: This handler assumes the SMM window goes from 0xa0000
- * to 0xaffff. In fact, at least on Intel Core CPUs (i945 chipset)
- * the SMM window is 128K big, covering 0xa0000 to 0xbffff.
- * So there is a lot of potential for growth in here. Let's stick
- * to 64k if we can though.
- */
-
-#include <cpu/x86/lapic_def.h>
-#include <cpu/x86/msr.h>
-
-/*
- * +--------------------------------+ 0xaffff
- * | Save State Map Node 0 |
- * | Save State Map Node 1 |
- * | Save State Map Node 2 |
- * | Save State Map Node 3 |
- * | ... |
- * +--------------------------------+ 0xaf000
- * | |
- * | |
- * | |
- * +--------------------------------+ 0xa8400
- * | SMM Entry Node 0 (+ stack) |
- * +--------------------------------+ 0xa8000
- * | SMM Entry Node 1 (+ stack) |
- * | SMM Entry Node 2 (+ stack) |
- * | SMM Entry Node 3 (+ stack) |
- * | ... |
- * +--------------------------------+ 0xa7400
- * | |
- * | SMM Handler |
- * | |
- * +--------------------------------+ 0xa0000
- *
- */
-
-/* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG
- * at which smm_handler_start lives. At the moment the handler
- * lives right at 0xa0000, so the offset is 0.
- */
-
-#define SMM_HANDLER_OFFSET 0x0000
-
-#if ENV_X86_64
-.bss
-ia32efer_backup_eax:
-.long 0
-ia32efer_backup_edx:
-.long 0
-#endif
-
-/* initially SMM is some sort of real mode. Let gcc know
- * how to treat the SMM handler stub
- */
-
-.section ".handler", "a", @progbits
-
-.code16
-
-/**
- * SMM code to enable protected mode and jump to the
- * C-written function void smi_handler(u32 smm_revision)
- *
- * All the bad magic is not all that bad after all.
- */
-#define SMM_START 0xa0000
-#define SMM_END 0xb0000
-#if SMM_END <= SMM_START
-#error invalid SMM configuration
-#endif
-.global smm_handler_start
-smm_handler_start:
-#if CONFIG(SMM_LAPIC_REMAP_MITIGATION)
- /* Check if the LAPIC register block overlaps with SMM.
- * This block needs to work without data accesses because they
- * may be routed into the LAPIC register block.
- * Code accesses, on the other hand, are never routed to LAPIC,
- * which is what makes this work in the first place.
- */
- mov $LAPIC_BASE_MSR, %ecx
- rdmsr
- and $(~0xfff), %eax
- sub $(SMM_START), %eax
- cmp $(SMM_END - SMM_START), %eax
- ja untampered_lapic
-1:
- /* emit "Crash" on serial */
- mov $(CONFIG_TTYS0_BASE), %dx
- mov $'C', %al
- out %al, (%dx)
- mov $'r', %al
- out %al, (%dx)
- mov $'a', %al
- out %al, (%dx)
- mov $'s', %al
- out %al, (%dx)
- mov $'h', %al
- out %al, (%dx)
- /* now crash for real */
- ud2
-untampered_lapic:
-#endif
- movw $(smm_gdtptr16 - smm_handler_start + SMM_HANDLER_OFFSET), %bx
- lgdtl %cs:(%bx)
-
- movl %cr0, %eax
- andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */
- orl $0x60000001, %eax /* CD, NW, PE = 1 */
- movl %eax, %cr0
-
- /* Enable protected mode */
- ljmpl $0x08, $1f
-
-.code32
-1:
- /* flush the cache after disabling it */
- wbinvd
-
- /* Use flat data segment */
- movw $0x10, %ax
- movw %ax, %ds
- movw %ax, %es
- movw %ax, %ss
- xor %ax, %ax /* zero out the gs and fs segment index */
- movw %ax, %fs
- movw %ax, %gs /* Will be used for cpu_info */
-
- /* FIXME: Incompatible with X2APIC_SUPPORT. */
- /* Get this CPU's LAPIC ID */
- movl $(LAPIC_DEFAULT_BASE | LAPIC_ID), %esi
- movl (%esi), %ecx
- shr $24, %ecx
-
- /* This is an ugly hack, and we should find a way to read the CPU index
- * without relying on the LAPIC ID.
- */
-
- /* calculate stack offset by multiplying the APIC ID
- * by 1024 (0x400), and save that offset in ebp.
- */
- shl $10, %ecx
- movl %ecx, %ebp
-
- /* We put the stack for each core right above
- * its SMM entry point. Core 0 starts at 0xa8000,
- * we spare 0x10 bytes for the jump to be sure.
- */
- movl $0xa8010, %eax
- subl %ecx, %eax /* subtract offset, see above */
- movl %eax, %ebx /* Save bottom of stack in ebx */
-
-#define SMM_STACK_SIZE (0x400 - 0x10)
- /* clear stack */
- cld
- movl %eax, %edi
- movl $(SMM_STACK_SIZE >> 2), %ecx
- xorl %eax, %eax
- rep stosl
-
- /* set new stack */
- addl $SMM_STACK_SIZE, %ebx
- movl %ebx, %esp
-
-#if ENV_X86_64
- /* Backup IA32_EFER. Preserves ebx. */
- movl $(IA32_EFER), %ecx
- rdmsr
- movl %eax, ia32efer_backup_eax
- movl %edx, ia32efer_backup_edx
-
- /* Enable long mode. Preserves ebx. */
-#include <cpu/x86/64bit/entry64.inc>
-
-#endif
- /* Call C handler */
- call smi_handler
-
-#if ENV_X86_64
- /*
- * The only reason to go back to protected mode is that RSM doesn't restore
- * MSR registers and MSR IA32_EFER was modified by entering long mode.
- * Drop to protected mode to safely operate on the IA32_EFER MSR.
- */
-
- /* Disable long mode. */
- #include <cpu/x86/64bit/exit32.inc>
-
- /* Restore IA32_EFER as RSM doesn't restore MSRs. */
- movl $(IA32_EFER), %ecx
- movl ia32efer_backup_eax, %eax
- movl ia32efer_backup_edx, %edx
- wrmsr
-#endif
-
- /* To return, just do rsm. It will "clean up" protected mode */
- rsm
-
-.code16
-
-.align 4, 0xff
-
-smm_gdtptr16:
- .word smm_gdt_end - smm_gdt - 1
- .long smm_gdt - smm_handler_start + 0xa0000 + SMM_HANDLER_OFFSET
-
-.code32
-
-smm_gdt:
- /* The first GDT entry can not be used. Keep it zero */
- .long 0x00000000, 0x00000000
-
- /* gdt selector 0x08, flat code segment */
- .word 0xffff, 0x0000
- .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
-
- /* gdt selector 0x10, flat data segment */
- .word 0xffff, 0x0000
- .byte 0x00, 0x93, 0xcf, 0x00
-
- /* gdt selector 0x18, flat code segment (64-bit) */
- .word 0xffff, 0x0000
- .byte 0x00, 0x9b, 0xaf, 0x00
-smm_gdt_end:
-
-
-.section ".jumptable", "a", @progbits
-
-/* This is the SMM jump table. All cores use the same SMM handler
- * for simplicity. But SMM Entry needs to be different due to the
- * save state area. The jump table makes sure all CPUs jump into the
- * real handler on SMM entry.
- */
-
-/* This code currently supports up to 4 CPU cores. If more than 4 CPU cores
- * shall be used, below table has to be updated, as well as smm.ld
- */
-
-/* GNU AS/LD will always generate code that assumes CS is 0xa000. In reality
- * CS will be set to SMM_BASE[19:4] though. Knowing that the smm handler is the
- * first thing in the ASEG, we do a far jump here, to set CS to 0xa000.
- */
-
-.code16
-jumptable:
- /* core 3 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
-.align 1024, 0x00
- /* core 2 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
-.align 1024, 0x00
- /* core 1 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
-.align 1024, 0x00
- /* core 0 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
-.align 1024, 0x00
diff --git a/src/cpu/x86/smm/smmrelocate.S b/src/cpu/x86/smm/smmrelocate.S
deleted file mode 100644
index b89cf3d0b8..0000000000
--- a/src/cpu/x86/smm/smmrelocate.S
+++ /dev/null
@@ -1,163 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-// FIXME: Is this piece of code southbridge specific, or
-// can it be cleaned up so this include is not required?
-// It's needed right now because we get our DEFAULT_PMBASE from
-// here.
-#if CONFIG(SOUTHBRIDGE_INTEL_I82801IX)
-#include <southbridge/intel/i82801ix/i82801ix.h>
-#else
-#error "Southbridge needs SMM handler support."
-#endif
-
-// ADDR32() macro
-#include <arch/registers.h>
-
-#if !CONFIG(SMM_ASEG)
-#error "Only use this file with ASEG."
-#endif /* CONFIG_SMM_ASEG */
-
-#define LAPIC_ID 0xfee00020
-
-.global smm_relocation_start
-.global smm_relocation_end
-
-/* initially SMM is some sort of real mode. */
-.code16
-
-/**
- * When starting up, x86 CPUs have their SMBASE set to 0x30000. However,
- * this is not a good place for the SMM handler to live, so it needs to
- * be relocated.
- * Traditionally SMM handlers used to live in the A segment (0xa0000).
- * With growing SMM handlers, more CPU cores, etc. CPU vendors started
- * allowing to relocate the handler to the end of physical memory, which
- * they refer to as TSEG.
- * This trampoline code relocates SMBASE to base address - ( lapicid * 0x400 )
- *
- * Why 0x400? It is a safe value to cover the save state area per CPU. On
- * current AMD CPUs this area is _documented_ to be 0x200 bytes. On Intel
- * Core 2 CPUs the _documented_ parts of the save state area is 48 bytes
- * bigger, effectively sizing our data structures 0x300 bytes.
- *
- * Example (with SMM handler living at 0xa0000):
- *
- * LAPICID SMBASE SMM Entry SAVE STATE
- * 0 0xa0000 0xa8000 0xafd00
- * 1 0x9fc00 0xa7c00 0xaf900
- * 2 0x9f800 0xa7800 0xaf500
- * 3 0x9f400 0xa7400 0xaf100
- * 4 0x9f000 0xa7000 0xaed00
- * 5 0x9ec00 0xa6c00 0xae900
- * 6 0x9e800 0xa6800 0xae500
- * 7 0x9e400 0xa6400 0xae100
- * 8 0x9e000 0xa6000 0xadd00
- * 9 0x9dc00 0xa5c00 0xad900
- * 10 0x9d800 0xa5800 0xad500
- * 11 0x9d400 0xa5400 0xad100
- * 12 0x9d000 0xa5000 0xacd00
- * 13 0x9cc00 0xa4c00 0xac900
- * 14 0x9c800 0xa4800 0xac500
- * 15 0x9c400 0xa4400 0xac100
- * . . . .
- * . . . .
- * . . . .
- * 31 0x98400 0xa0400 0xa8100
- *
- * With 32 cores, the SMM handler would need to fit between
- * 0xa0000-0xa0400 and the stub plus stack would need to go
- * at 0xa8000-0xa8100 (example for core 0). That is not enough.
- *
- * This means we're basically limited to 16 CPU cores before
- * we need to move the SMM handler to TSEG.
- *
- * Note: Some versions of Pentium M need their SMBASE aligned to 32k.
- * On those the above only works for up to 2 cores. But for now we only
- * care fore Core (2) Duo/Solo
- *
- */
-
-smm_relocation_start:
- /* Check revision to see if AMD64 style SMM_BASE
- * Intel Core Solo/Duo: 0x30007
- * Intel Core2 Solo/Duo: 0x30100
- * Intel SandyBridge: 0x30101
- * AMD64: 0x3XX64
- * This check does not make much sense, unless someone ports
- * SMI handling to AMD64 CPUs.
- */
-
- mov $0x38000 + 0x7efc, %ebx
- ADDR32(mov) (%ebx), %al
- cmp $0x64, %al
- je 1f
-
- mov $0x38000 + 0x7ef8, %ebx
- jmp smm_relocate
-1:
- mov $0x38000 + 0x7f00, %ebx
-
-smm_relocate:
- /* Get this CPU's LAPIC ID */
- movl $LAPIC_ID, %esi
- ADDR32(movl) (%esi), %ecx
- shr $24, %ecx
-
- /* calculate offset by multiplying the
- * APIC ID by 1024 (0x400)
- */
- movl %ecx, %edx
- shl $10, %edx
-
- movl $0xa0000, %eax
- subl %edx, %eax /* subtract offset, see above */
-
- ADDR32(movl) %eax, (%ebx)
-
- /* The next section of code is potentially southbridge specific */
-
- /* Clear SMI status */
- movw $(DEFAULT_PMBASE + 0x34), %dx
- inw %dx, %ax
- outw %ax, %dx
-
- /* Clear PM1 status */
- movw $(DEFAULT_PMBASE + 0x00), %dx
- inw %dx, %ax
- outw %ax, %dx
-
- /* Set EOS bit so other SMIs can occur */
- movw $(DEFAULT_PMBASE + 0x30), %dx
- inl %dx, %eax
- orl $(1 << 1), %eax
- outl %eax, %dx
-
- /* End of southbridge specific section. */
-
-#if CONFIG(DEBUG_SMM_RELOCATION)
- /* print [SMM-x] so we can determine if CPUx went to SMM */
- movw $CONFIG_TTYS0_BASE, %dx
- mov $'[', %al
- outb %al, %dx
- mov $'S', %al
- outb %al, %dx
- mov $'M', %al
- outb %al, %dx
- outb %al, %dx
- movb $'-', %al
- outb %al, %dx
- /* calculate ascii of CPU number. More than 9 cores? -> FIXME */
- movb %cl, %al
- addb $'0', %al
- outb %al, %dx
- mov $']', %al
- outb %al, %dx
- mov $'\r', %al
- outb %al, %dx
- mov $'\n', %al
- outb %al, %dx
-#endif
-
- /* That's it. return */
- rsm
-smm_relocation_end: