diff options
Diffstat (limited to 'src/cpu/x86')
-rw-r--r-- | src/cpu/x86/32bit/entry32.inc | 33 | ||||
-rw-r--r-- | src/cpu/x86/Makefile.inc | 1 | ||||
-rw-r--r-- | src/cpu/x86/entry32.S | 79 |
3 files changed, 80 insertions, 33 deletions
diff --git a/src/cpu/x86/32bit/entry32.inc b/src/cpu/x86/32bit/entry32.inc deleted file mode 100644 index b28fa2f37e..0000000000 --- a/src/cpu/x86/32bit/entry32.inc +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - -/* For starting coreboot in protected mode */ - -#include <arch/rom_segs.h> -#include <cpu/x86/post_code.h> - - .code32 -/* - * When we come here we are in protected mode. - * NOTE aligned to 4 so that we are sure that the prefetch - * cache will be reloaded. - */ - .align 4 - -.globl __protected_start -__protected_start: - /* Save the BIST value */ - movl %eax, %ebp - -#if !CONFIG(NO_EARLY_BOOTBLOCK_POSTCODES) - post_code(POST_ENTER_PROTECTED_MODE) -#endif - - movw $ROM_DATA_SEG, %ax - movw %ax, %ds - movw %ax, %es - movw %ax, %ss - movw %ax, %fs - movw %ax, %gs - - /* Restore the BIST value to %eax */ - movl %ebp, %eax diff --git a/src/cpu/x86/Makefile.inc b/src/cpu/x86/Makefile.inc index 9112ddbe1b..f1d41bd1b4 100644 --- a/src/cpu/x86/Makefile.inc +++ b/src/cpu/x86/Makefile.inc @@ -8,6 +8,7 @@ ramstage-y += backup_default_smm.c subdirs-$(CONFIG_CPU_INTEL_COMMON_SMM) += ../intel/smm +bootblock-y += entry32.S bootblock-y += entry16.S bootblock-y += reset16.S diff --git a/src/cpu/x86/entry32.S b/src/cpu/x86/entry32.S new file mode 100644 index 0000000000..32f61ad261 --- /dev/null +++ b/src/cpu/x86/entry32.S @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* For starting coreboot in protected mode */ + +/* + * This is the modern bootblock. It prepares the system for C environment runtime + * setup. The actual setup is done by hardware-specific code. + * + * It provides a bootflow similar to other architectures, and thus is considered + * to be the modern approach. + * + */ + +#include <arch/rom_segs.h> +#include <cpu/x86/cr.h> +#include <cpu/x86/post_code.h> + +.section .init, "ax", @progbits + + .code32 +/* + * When we come here we are in protected mode. + * NOTE aligned to 4 so that we are sure that the prefetch + * cache will be reloaded. + */ + .align 4 + +.globl __protected_start +__protected_start: + /* Save the BIST value */ + movl %eax, %ebp + +#if !CONFIG(NO_EARLY_BOOTBLOCK_POSTCODES) + post_code(POST_ENTER_PROTECTED_MODE) +#endif + + movw $ROM_DATA_SEG, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %ss + movw %ax, %fs + movw %ax, %gs + + /* Restore the BIST value to %eax */ + movl %ebp, %eax + +#if CONFIG(BOOTBLOCK_DEBUG_SPINLOOP) + + /* Wait for a JTAG debugger to break in and set EBX non-zero */ + xor %ebx, %ebx + +debug_spinloop: + cmp $0, %ebx + jz debug_spinloop +#endif + +bootblock_protected_mode_entry: + +#if !CONFIG(USE_MARCH_586) + /* MMX registers required here */ + + /* BIST result in eax */ + movd %eax, %mm0 + + /* Get an early timestamp */ + rdtsc + movd %eax, %mm1 + movd %edx, %mm2 +#endif + +#if CONFIG(SSE) +enable_sse: + mov %cr4, %eax + or $CR4_OSFXSR, %ax + mov %eax, %cr4 +#endif /* CONFIG(SSE) */ + + /* We're done. Now it's up to platform-specific code */ + jmp bootblock_pre_c_entry |