From 03a79520d6f62072ff3de75cc8bbbf0ff4876f62 Mon Sep 17 00:00:00 2001 From: Patrick Rudolph Date: Sun, 29 Sep 2019 11:08:33 +0200 Subject: cpu/x86/smm: Add support for long mode Enable long mode in SMM handler. x86_32 isn't affected by this change. As the rsm instruction used to leave SMM doesn't restore MSR registers, drop back to protected mode after running the smi_handler and restore IA32_EFER MSR (which enables long mode support) to previous value. NOTE: This commit does NOT introduce a new security model. It uses the same page tables as the remaining firmware does. This can be a security risk if someone is able to manipulate the page tables stored in ROM at runtime. USE FOR TESTING ONLY! Tested on Qemu Q35. Change-Id: I8bba4af4688c723fc079ae905dac95f57ea956f8 Signed-off-by: Patrick Rudolph Reviewed-on: https://review.coreboot.org/c/coreboot/+/35681 Reviewed-by: Raul Rangel Reviewed-by: Angel Pons Tested-by: build bot (Jenkins) --- src/cpu/x86/smm/smmhandler.S | 46 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) (limited to 'src/cpu/x86/smm') diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S index 036bc83e2f..340840f685 100644 --- a/src/cpu/x86/smm/smmhandler.S +++ b/src/cpu/x86/smm/smmhandler.S @@ -8,6 +8,7 @@ */ #include +#include /* * +--------------------------------+ 0xaffff @@ -42,6 +43,14 @@ #define SMM_HANDLER_OFFSET 0x0000 +#if defined(__x86_64__) +.bss +ia32efer_backup_eax: +.long +ia32efer_backup_edx: +.long +#endif + /* initially SMM is some sort of real mode. Let gcc know * how to treat the SMM handler stub */ @@ -159,12 +168,44 @@ untampered_lapic: /* Get SMM revision */ movl $0xa8000 + 0x7efc, %ebx /* core 0 address */ subl %ebp, %ebx /* subtract core X offset */ + +#if defined(__x86_64__) + /* Backup IA32_EFER. Preserves ebx. */ + movl $(IA32_EFER), %ecx + rdmsr + movl %eax, ia32efer_backup_eax + movl %edx, ia32efer_backup_edx + + /* Enable long mode. Preserves ebx. */ +#include + + mov (%ebx), %rdi + +#else movl (%ebx), %eax pushl %eax +#endif - /* Call 32bit C handler */ + /* Call C handler */ call smi_handler +#if defined(__x86_64__) + /* + * The only reason to go back to protected mode is that RSM doesn't restore + * MSR registers and MSR IA32_EFER was modified by entering long mode. + * Drop to protected mode to safely operate on the IA32_EFER MSR. + */ + + /* Disable long mode. */ + #include + + /* Restore IA32_EFER as RSM doesn't restore MSRs. */ + movl $(IA32_EFER), %ecx + movl ia32efer_backup_eax, %eax + movl ia32efer_backup_edx, %edx + wrmsr +#endif + /* To return, just do rsm. It will "clean up" protected mode */ rsm @@ -190,6 +231,9 @@ smm_gdt: .word 0xffff, 0x0000 .byte 0x00, 0x93, 0xcf, 0x00 + /* gdt selector 0x18, flat code segment (64-bit) */ + .word 0xffff, 0x0000 + .byte 0x00, 0x9b, 0xaf, 0x00 smm_gdt_end: -- cgit v1.2.3