summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAngel Pons <th3fanbus@gmail.com>2020-10-20 23:03:45 +0200
committerAngel Pons <th3fanbus@gmail.com>2020-10-22 20:06:54 +0000
commit8f8cb95fe92170220ccf7ad44f22073fee561c40 (patch)
treea237f4f1aa620af8837ce1095004b965321b0417
parent01490258bb26a1bbb7b41e0cf8100ec4d42082cb (diff)
sec/intel/txt: Split MTRR setup ASM code into a macro
If necessary, SCLEAN needs to run in early romstage, where DRAM is not working yet. In fact, that the DRAM isn't working is the reason to run SCLEAN in the first place. Before running GETSEC, CAR needs to be torn down, as MTRRs have to be reprogrammed to cache the BIOS ACM. Further, running SCLEAN leaves the system in an undefined state, where the only sane thing to do is reset the platform. Thus, invoking SCLEAN requires specific assembly prologue and epilogue sections before and after MTRR setup, and neither DRAM nor CAR may be relied upon for the MTRR setup. In order to handle this without duplicating the MTRR setup code, place it in a macro on a separate file. This needs to be a macro because the call and return instructions rely on the stack being usable, and it is not the case for SCLEAN. The MTRR code clobbers many registers, but no other choice remains when the registers cannot be saved anywhere else. Tested on Asrock B85M Pro4, BIOS ACM can still be launched. Change-Id: I2f5e82f57b458ca1637790ddc1ddc14bba68ac49 Signed-off-by: Angel Pons <th3fanbus@gmail.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/46603 Reviewed-by: Arthur Heymans <arthur@aheymans.xyz> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
-rw-r--r--src/security/intel/txt/getsec_enteraccs.S74
-rw-r--r--src/security/intel/txt/getsec_mtrr_setup.inc74
2 files changed, 84 insertions, 64 deletions
diff --git a/src/security/intel/txt/getsec_enteraccs.S b/src/security/intel/txt/getsec_enteraccs.S
index be038b0607..be3a1b4f0b 100644
--- a/src/security/intel/txt/getsec_enteraccs.S
+++ b/src/security/intel/txt/getsec_enteraccs.S
@@ -4,7 +4,7 @@
#include <cpu/x86/cr.h>
#include <cpu/x86/msr.h>
-#define MTRR_HIGH_MASK $((1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1)
+#include "getsec_mtrr_setup.inc"
.macro PUSH_MSR x
movl $(\x), %ecx
@@ -166,15 +166,6 @@ cond_clear_var_mtrrs:
* Intel TXT Software Development Guide (Document: 315168-015)
*/
- /*
- * Important note: The MTRRs must cache less than a page (4 KiB)
- * of unused memory after the BIOS ACM. Failure to do so will
- * result in a TXT reset with Class Code 5, Major Error Code 2.
- *
- * The caller must have checked that there are enough variable
- * MTRRs to cache the ACM size prior to invoking this routine.
- */
-
/* Determine size of AC module */
movl 12(%ebp), %eax /* %eax = acmbase */
movl $1, %ebx
@@ -191,60 +182,15 @@ cond_clear_var_mtrrs:
movd %eax, %xmm0 /* XMM0: Base address of next MTRR */
movd %ebx, %xmm1 /* XMM1: Remaining size to cache */
- /* Get the number of variable MTRRs */
- movl $(MTRR_CAP_MSR), %ecx
- rdmsr
- andl $(0xff), %eax
-
- /* Initialize ECX */
- movl $(MTRR_PHYS_BASE(0)), %ecx
-
- jmp cond_allocate_var_mtrrs
-
-body_allocate_var_mtrrs:
-
- /* Program MTRR base */
- xorl %edx, %edx
- movd %xmm0, %eax
- orl $(MTRR_TYPE_WRBACK), %eax
- wrmsr
- incl %ecx /* Move index to MTRR_PHYS_MASK */
-
- /* Temporarily transfer MSR index to EDX so that CL can be used */
- movl %ecx, %edx
-
- /* Determine next size to cache */
- bsr %ebx, %ecx
- movl $(1), %ebx
- shl %cl, %ebx /* Can only use CL here */
-
- /* Restore ECX */
- movl %edx, %ecx
-
- /* Update saved base address */
- addl %ebx, %eax
- movd %eax, %xmm0
-
- /* Update saved remaining size */
- movd %xmm1, %eax
- subl %ebx, %eax
- movd %eax, %xmm1
-
- /* Program MTRR mask */
- movl MTRR_HIGH_MASK, %edx
- xorl %eax, %eax
- subl %ebx, %eax /* %eax = 4GIB - size to cache */
- orl $(MTRR_PHYS_MASK_VALID), %eax
- wrmsr
- incl %ecx /* Move index to next MTRR_PHYS_BASE */
-
-cond_allocate_var_mtrrs:
-
- /* Check if we still need to cache something */
- movd %xmm1, %ebx
- andl %ebx, %ebx
-
- jnz body_allocate_var_mtrrs
+ /*
+ * Important note: The MTRRs must cache less than a page (4 KiB)
+ * of unused memory after the BIOS ACM. Failure to do so will
+ * result in a TXT reset with Class Code 5, Major Error Code 2.
+ *
+ * The caller must have checked that there are enough variable
+ * MTRRs to cache the ACM size prior to invoking this routine.
+ */
+ SET_UP_MTRRS_FOR_BIOS_ACM
/*
* Now that the variable MTRRs have been set up, enable them.
diff --git a/src/security/intel/txt/getsec_mtrr_setup.inc b/src/security/intel/txt/getsec_mtrr_setup.inc
new file mode 100644
index 0000000000..15e8cc17ad
--- /dev/null
+++ b/src/security/intel/txt/getsec_mtrr_setup.inc
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <cpu/x86/mtrr.h>
+#include <cpu/x86/msr.h>
+
+#define MTRR_HIGH_MASK $((1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1)
+
+/*
+ * Configure the MTRRs to cache the BIOS ACM. No general-purpose
+ * registers are preserved. Inputs are taken from SSE registers:
+ *
+ * %xmm0: BIOS ACM base
+ * %xmm1: BIOS ACM size
+ *
+ * These two SSE registers are not preserved, but the others are.
+ */
+.macro SET_UP_MTRRS_FOR_BIOS_ACM
+
+ /* Get the number of variable MTRRs */
+ movl $(MTRR_CAP_MSR), %ecx
+ rdmsr
+ andl $(0xff), %eax
+
+ /* Initialize ECX */
+ movl $(MTRR_PHYS_BASE(0)), %ecx
+
+ jmp cond_allocate_var_mtrrs
+
+body_allocate_var_mtrrs:
+
+ /* Program MTRR base */
+ xorl %edx, %edx
+ movd %xmm0, %eax
+ orl $(MTRR_TYPE_WRBACK), %eax
+ wrmsr
+ incl %ecx /* Move index to MTRR_PHYS_MASK */
+
+ /* Temporarily transfer MSR index to EDX so that CL can be used */
+ movl %ecx, %edx
+
+ /* Determine next size to cache */
+ bsr %ebx, %ecx
+ movl $(1), %ebx
+ shl %cl, %ebx /* Can only use CL here */
+
+ /* Restore ECX */
+ movl %edx, %ecx
+
+ /* Update saved base address */
+ addl %ebx, %eax
+ movd %eax, %xmm0
+
+ /* Update saved remaining size */
+ movd %xmm1, %eax
+ subl %ebx, %eax
+ movd %eax, %xmm1
+
+ /* Program MTRR mask */
+ movl MTRR_HIGH_MASK, %edx
+ xorl %eax, %eax
+ subl %ebx, %eax /* %eax = 4GIB - size to cache */
+ orl $(MTRR_PHYS_MASK_VALID), %eax
+ wrmsr
+ incl %ecx /* Move index to next MTRR_PHYS_BASE */
+
+cond_allocate_var_mtrrs:
+
+ /* Check if we still need to cache something */
+ movd %xmm1, %ebx
+ andl %ebx, %ebx
+
+ jnz body_allocate_var_mtrrs
+
+.endm