summaryrefslogtreecommitdiff
path: root/src/soc/amd/common/block
diff options
context:
space:
mode:
authorMatt DeVillier <matt.devillier@gmail.com>2023-10-14 18:55:20 -0500
committerFelix Held <felix-coreboot@felixheld.de>2023-10-25 20:20:34 +0000
commit33aa2901f85b8a37b0984e378b405465e17f2ce6 (patch)
tree7f68ecb8de20920842014ac03d441c1d75ebab3f /src/soc/amd/common/block
parent51d1f30d0eed4d594d351cc3c1d92f48f45bfb27 (diff)
soc/amd/common/smm: Add option for late SMM locking
Pre-Zen SoCs like Stoneyridge call into an AGESA binary as part of S3 resume, which will fail if SMM is locked, causing the device to (eventually) cold boot. To mitigate this, add a new Kconfig to enable "late" SMM locking, which restores the previous behavior prior to commit 43ed5d253422 ("cpu/amd: Move locking SMM as part of SMM init"). TEST=tested with rest of patch train Change-Id: I9971814415271a6a107c327523a0a7c188a91df6 Signed-off-by: Matt DeVillier <matt.devillier@gmail.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/78352 Reviewed-by: Felix Held <felix-coreboot@felixheld.de> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/soc/amd/common/block')
-rw-r--r--src/soc/amd/common/block/cpu/Kconfig9
-rw-r--r--src/soc/amd/common/block/cpu/smm/finalize.c22
-rw-r--r--src/soc/amd/common/block/cpu/smm/smm_helper.c14
-rw-r--r--src/soc/amd/common/block/cpu/smm/smm_relocate.c15
-rw-r--r--src/soc/amd/common/block/include/amdblocks/smm.h2
5 files changed, 52 insertions, 10 deletions
diff --git a/src/soc/amd/common/block/cpu/Kconfig b/src/soc/amd/common/block/cpu/Kconfig
index f926887cf5..1ffaece9dd 100644
--- a/src/soc/amd/common/block/cpu/Kconfig
+++ b/src/soc/amd/common/block/cpu/Kconfig
@@ -85,6 +85,15 @@ config SOC_AMD_COMMON_BLOCK_SMM
Add common SMM relocation, finalization and handler functionality to
the build.
+config SOC_AMD_COMMON_LATE_SMM_LOCKING
+ bool
+ depends on SOC_AMD_COMMON_BLOCK_SMM
+ help
+ Select this option to perform SMM locking late in soc_finalize(), rather than earlier
+ in smm_relocation_handler(). This is required for pre-Zen SoCs like Stoneyridge which
+ call into an AGESA binary as part of S3 resume, and require SMM to still be unlocked
+ at that time.
+
config SOC_AMD_COMMON_BLOCK_SVI2
bool
help
diff --git a/src/soc/amd/common/block/cpu/smm/finalize.c b/src/soc/amd/common/block/cpu/smm/finalize.c
index b81b9bcd1d..a3ba188d9e 100644
--- a/src/soc/amd/common/block/cpu/smm/finalize.c
+++ b/src/soc/amd/common/block/cpu/smm/finalize.c
@@ -9,8 +9,30 @@
#include <cpu/x86/msr.h>
#include <types.h>
+static void late_smm_lock(void *unused)
+{
+ /* Finalize SMM settings */
+ if (is_smm_locked()) /* Skip if already locked, avoid GPF */
+ return;
+
+ if (CONFIG(HAVE_SMI_HANDLER))
+ tseg_valid();
+
+ lock_smm();
+}
+
+static void late_smm_finalize(void)
+{
+ printk(BIOS_SPEW, "Lock SMM configuration\n");
+ if (mp_run_on_all_cpus(late_smm_lock, NULL) != CB_SUCCESS)
+ printk(BIOS_WARNING, "Failed to finalize all cores\n");
+}
+
static void soc_finalize(void *unused)
{
+ if (CONFIG(SOC_AMD_COMMON_LATE_SMM_LOCKING))
+ late_smm_finalize();
+
if (!acpi_is_wakeup_s3()) {
acpi_clear_pm_gpe_status();
diff --git a/src/soc/amd/common/block/cpu/smm/smm_helper.c b/src/soc/amd/common/block/cpu/smm/smm_helper.c
index a6e7d2dac5..e87d12c641 100644
--- a/src/soc/amd/common/block/cpu/smm/smm_helper.c
+++ b/src/soc/amd/common/block/cpu/smm/smm_helper.c
@@ -30,6 +30,20 @@ void clear_tvalid(void)
wrmsr(SMM_MASK_MSR, mask);
}
+void tseg_valid(void)
+{
+ msr_t mask = rdmsr(SMM_MASK_MSR);
+ mask.lo |= SMM_TSEG_VALID;
+
+ wrmsr(SMM_MASK_MSR, mask);
+}
+
+bool is_smm_locked(void)
+{
+ msr_t hwcr = rdmsr(HWCR_MSR);
+ return hwcr.lo & SMM_LOCK ? true : false;
+}
+
void lock_smm(void)
{
msr_t hwcr = rdmsr(HWCR_MSR);
diff --git a/src/soc/amd/common/block/cpu/smm/smm_relocate.c b/src/soc/amd/common/block/cpu/smm/smm_relocate.c
index ae008853bd..d01f4c3e2d 100644
--- a/src/soc/amd/common/block/cpu/smm/smm_relocate.c
+++ b/src/soc/amd/common/block/cpu/smm/smm_relocate.c
@@ -52,14 +52,6 @@ static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
*smm_save_state_size = sizeof(amd64_smm_state_save_area_t);
}
-static void tseg_valid(void)
-{
- msr_t mask = rdmsr(SMM_MASK_MSR);
- mask.lo |= SMM_TSEG_VALID;
-
- wrmsr(SMM_MASK_MSR, mask);
-}
-
static void smm_relocation_handler(void)
{
uintptr_t tseg_base;
@@ -87,8 +79,11 @@ static void smm_relocation_handler(void)
};
wrmsr(SMM_BASE_MSR, smm_base);
- tseg_valid();
- lock_smm();
+
+ if (!CONFIG(SOC_AMD_COMMON_LATE_SMM_LOCKING)) {
+ tseg_valid();
+ lock_smm();
+ }
}
static void post_mp_init(void)
diff --git a/src/soc/amd/common/block/include/amdblocks/smm.h b/src/soc/amd/common/block/include/amdblocks/smm.h
index 97b11be609..bcd30f8ed2 100644
--- a/src/soc/amd/common/block/include/amdblocks/smm.h
+++ b/src/soc/amd/common/block/include/amdblocks/smm.h
@@ -11,6 +11,8 @@ void handle_smi_gsmi(void);
void handle_smi_store(void);
void fch_apmc_smi_handler(void);
void clear_tvalid(void);
+void tseg_valid(void);
+bool is_smm_locked(void);
void lock_smm(void);
/* See SMITYPE_* for list possible of events. GEVENTS are handled with mainboard_smi_gpi. */
void mainboard_handle_smi(int event);