aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/cpu/amd/model_10xxx/model_10xxx_init.c13
-rw-r--r--src/cpu/amd/model_fxx/model_fxx_init.c16
-rw-r--r--src/cpu/amd/smm/smm_init.c124
-rw-r--r--src/include/cpu/amd/model_10xxx_msr.h4
-rw-r--r--src/include/cpu/amd/model_fxx_msr.h4
-rw-r--r--src/include/cpu/x86/smm.h3
6 files changed, 82 insertions, 82 deletions
diff --git a/src/cpu/amd/model_10xxx/model_10xxx_init.c b/src/cpu/amd/model_10xxx/model_10xxx_init.c
index a92852f521..2e8bbfe09d 100644
--- a/src/cpu/amd/model_10xxx/model_10xxx_init.c
+++ b/src/cpu/amd/model_10xxx/model_10xxx_init.c
@@ -24,6 +24,7 @@
#include <device/pci.h>
#include <string.h>
#include <cpu/x86/msr.h>
+#include <cpu/x86/smm.h>
#include <cpu/x86/pae.h>
#include <pc80/mc146818rtc.h>
#include <cpu/x86/lapic.h>
@@ -118,7 +119,17 @@ static void model_10xxx_init(device_t dev)
msr.hi &= ~(1 << (35-32));
wrmsr(BU_CFG2_MSR, msr);
- /* Write protect SMM space with SMMLOCK. */
+ /* Set SMM base address for this CPU */
+ msr = rdmsr(SMM_BASE_MSR);
+ msr.lo = SMM_BASE - (lapicid() * 0x400);
+ wrmsr(SMM_BASE_MSR, msr);
+
+ /* Enable the SMM memory window */
+ msr = rdmsr(SMM_MASK_MSR);
+ msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */
+ wrmsr(SMM_MASK_MSR, msr);
+
+ /* Set SMMLOCK to avoid exploits messing with SMM */
msr = rdmsr(HWCR_MSR);
msr.lo |= (1 << 0);
wrmsr(HWCR_MSR, msr);
diff --git a/src/cpu/amd/model_fxx/model_fxx_init.c b/src/cpu/amd/model_fxx/model_fxx_init.c
index 0608d0ae42..ce5c810821 100644
--- a/src/cpu/amd/model_fxx/model_fxx_init.c
+++ b/src/cpu/amd/model_fxx/model_fxx_init.c
@@ -24,6 +24,7 @@
#include <cpu/cpu.h>
#include <cpu/x86/cache.h>
#include <cpu/x86/mtrr.h>
+#include <cpu/x86/smm.h>
#include <cpu/amd/multicore.h>
#include <cpu/amd/model_fxx_msr.h>
@@ -547,6 +548,21 @@ static void model_fxx_init(device_t dev)
*/
if (id.coreid == 0)
init_ecc_memory(id.nodeid); // only do it for core 0
+
+ /* Set SMM base address for this CPU */
+ msr = rdmsr(SMM_BASE_MSR);
+ msr.lo = SMM_BASE - (lapicid() * 0x400);
+ wrmsr(SMM_BASE_MSR, msr);
+
+ /* Enable the SMM memory window */
+ msr = rdmsr(SMM_MASK_MSR);
+ msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */
+ wrmsr(SMM_MASK_MSR, msr);
+
+ /* Set SMMLOCK to avoid exploits messing with SMM */
+ msr = rdmsr(HWCR_MSR);
+ msr.lo |= (1 << 0);
+ wrmsr(HWCR_MSR, msr);
}
static struct device_operations cpu_dev_ops = {
diff --git a/src/cpu/amd/smm/smm_init.c b/src/cpu/amd/smm/smm_init.c
index ad1c112ed0..6398688c48 100644
--- a/src/cpu/amd/smm/smm_init.c
+++ b/src/cpu/amd/smm/smm_init.c
@@ -30,94 +30,56 @@
#include <cpu/x86/smm.h>
#include <string.h>
-#define SMM_BASE_MSR 0xc0010111
-#define SMM_ADDR_MSR 0xc0010112
-#define SMM_MASK_MSR 0xc0010113
-#define SMM_BASE 0xa0000
-
extern unsigned char _binary_smm_start;
extern unsigned char _binary_smm_size;
-static int smm_handler_copied = 0;
-
void smm_init(void)
{
- msr_t msr;
-
- msr = rdmsr(HWCR_MSR);
- if (msr.lo & (1 << 0)) {
- // This sounds like a bug... ?
- printk(BIOS_DEBUG, "SMM is still locked from last boot, using old handler.\n");
- return;
- }
-
- /* Only copy SMM handler once, not once per CPU */
- if (!smm_handler_copied) {
- msr_t syscfg_orig, mtrr_aseg_orig;
-
- smm_handler_copied = 1;
-
- /* Back up MSRs for later restore */
- syscfg_orig = rdmsr(SYSCFG_MSR);
- mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR);
-
- /* MTRR changes don't like an enabled cache */
- disable_cache();
-
- msr = syscfg_orig;
- /* Allow changes to MTRR extended attributes */
- msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
- /* turn the extended attributes off until we fix
- * them so A0000 is routed to memory
- */
- msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
- wrmsr(SYSCFG_MSR, msr);
-
- /* set DRAM access to 0xa0000 */
- /* A0000 is memory */
- msr.lo = 0x18181818;
- msr.hi = 0x18181818;
- wrmsr(MTRRfix16K_A0000_MSR, msr);
-
- /* enable the extended features */
- msr = syscfg_orig;
- msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
- msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
- wrmsr(SYSCFG_MSR, msr);
-
- enable_cache();
- /* copy the real SMM handler */
- memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
- wbinvd();
-
- /* Restore MTRR */
- disable_cache();
-
- /* Restore SYSCFG */
- wrmsr(SYSCFG_MSR, syscfg_orig);
-
- wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
- enable_cache();
- }
-
-
- /* But set SMM base address on all CPUs/cores */
- msr = rdmsr(SMM_BASE_MSR);
- msr.lo = SMM_BASE - (lapicid() * 0x400);
- wrmsr(SMM_BASE_MSR, msr);
-
- /* enable the SMM memory window */
- msr = rdmsr(SMM_MASK_MSR);
- msr.lo |= (1 << 0); // Enable ASEG SMRAM Range
- wrmsr(SMM_MASK_MSR, msr);
-
- /* Set SMMLOCK to avoid exploits messing with SMM */
- msr = rdmsr(HWCR_MSR);
- msr.lo |= (1 << 0);
- wrmsr(HWCR_MSR, msr);
+ msr_t msr, syscfg_orig, mtrr_aseg_orig;
+
+ /* Back up MSRs for later restore */
+ syscfg_orig = rdmsr(SYSCFG_MSR);
+ mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR);
+
+ /* MTRR changes don't like an enabled cache */
+ disable_cache();
+
+ msr = syscfg_orig;
+
+ /* Allow changes to MTRR extended attributes */
+ msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
+ /* turn the extended attributes off until we fix
+ * them so A0000 is routed to memory
+ */
+ msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
+ wrmsr(SYSCFG_MSR, msr);
+
+ /* set DRAM access to 0xa0000 */
+ msr.lo = 0x18181818;
+ msr.hi = 0x18181818;
+ wrmsr(MTRRfix16K_A0000_MSR, msr);
+
+ /* enable the extended features */
+ msr = syscfg_orig;
+ msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
+ msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
+ wrmsr(SYSCFG_MSR, msr);
+
+ enable_cache();
+ /* copy the real SMM handler */
+ memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
+ wbinvd();
+ disable_cache();
+
+ /* Restore SYSCFG and MTRR */
+ wrmsr(SYSCFG_MSR, syscfg_orig);
+ wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
+ enable_cache();
+
+ /* CPU MSR are set in CPU init */
}
void smm_lock(void)
{
- /* We lock SMM per CPU core */
+ /* We lock SMM in CPU init */
}
diff --git a/src/include/cpu/amd/model_10xxx_msr.h b/src/include/cpu/amd/model_10xxx_msr.h
index f497eb3a82..b34a2814cf 100644
--- a/src/include/cpu/amd/model_10xxx_msr.h
+++ b/src/include/cpu/amd/model_10xxx_msr.h
@@ -22,6 +22,10 @@
#include <cpu/x86/msr.h>
+#define SMM_BASE_MSR 0xC0010111
+#define SMM_ADDR_MSR 0xC0010112
+#define SMM_MASK_MSR 0xC0010113
+
#define HWCR_MSR 0xC0010015
#define NB_CFG_MSR 0xC001001f
#define LS_CFG_MSR 0xC0011020
diff --git a/src/include/cpu/amd/model_fxx_msr.h b/src/include/cpu/amd/model_fxx_msr.h
index b4795cbbb2..2ac2d4eb84 100644
--- a/src/include/cpu/amd/model_fxx_msr.h
+++ b/src/include/cpu/amd/model_fxx_msr.h
@@ -1,6 +1,10 @@
#ifndef CPU_AMD_MODEL_FXX_MSR_H
#define CPU_AMD_MODEL_FXX_MSR_H
+#define SMM_BASE_MSR 0xc0010111
+#define SMM_ADDR_MSR 0xc0010112
+#define SMM_MASK_MSR 0xc0010113
+
#define HWCR_MSR 0xC0010015
#define NB_CFG_MSR 0xC001001f
#define LS_CFG_MSR 0xC0011020
diff --git a/src/include/cpu/x86/smm.h b/src/include/cpu/x86/smm.h
index 49ee2be514..c314c3971a 100644
--- a/src/include/cpu/x86/smm.h
+++ b/src/include/cpu/x86/smm.h
@@ -24,6 +24,9 @@
#ifndef CPU_X86_SMM_H
#define CPU_X86_SMM_H
+/* used only by C programs so far */
+#define SMM_BASE 0xa0000
+
#include <types.h>
typedef struct {
u16 es_selector;