diff options
author | Matt DeVillier <matt.devillier@gmail.com> | 2023-10-14 18:55:20 -0500 |
---|---|---|
committer | Felix Held <felix-coreboot@felixheld.de> | 2023-10-25 20:20:34 +0000 |
commit | 33aa2901f85b8a37b0984e378b405465e17f2ce6 (patch) | |
tree | 7f68ecb8de20920842014ac03d441c1d75ebab3f /src/soc/amd/common/block/cpu/smm | |
parent | 51d1f30d0eed4d594d351cc3c1d92f48f45bfb27 (diff) |
soc/amd/common/smm: Add option for late SMM locking
Pre-Zen SoCs like Stoneyridge call into an AGESA binary as part of S3
resume, which will fail if SMM is locked, causing the device to
(eventually) cold boot. To mitigate this, add a new Kconfig to enable
"late" SMM locking, which restores the previous behavior prior to
commit 43ed5d253422 ("cpu/amd: Move locking SMM as part of SMM init").
TEST=tested with rest of patch train
Change-Id: I9971814415271a6a107c327523a0a7c188a91df6
Signed-off-by: Matt DeVillier <matt.devillier@gmail.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/78352
Reviewed-by: Felix Held <felix-coreboot@felixheld.de>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/soc/amd/common/block/cpu/smm')
-rw-r--r-- | src/soc/amd/common/block/cpu/smm/finalize.c | 22 | ||||
-rw-r--r-- | src/soc/amd/common/block/cpu/smm/smm_helper.c | 14 | ||||
-rw-r--r-- | src/soc/amd/common/block/cpu/smm/smm_relocate.c | 15 |
3 files changed, 41 insertions, 10 deletions
diff --git a/src/soc/amd/common/block/cpu/smm/finalize.c b/src/soc/amd/common/block/cpu/smm/finalize.c index b81b9bcd1d..a3ba188d9e 100644 --- a/src/soc/amd/common/block/cpu/smm/finalize.c +++ b/src/soc/amd/common/block/cpu/smm/finalize.c @@ -9,8 +9,30 @@ #include <cpu/x86/msr.h> #include <types.h> +static void late_smm_lock(void *unused) +{ + /* Finalize SMM settings */ + if (is_smm_locked()) /* Skip if already locked, avoid GPF */ + return; + + if (CONFIG(HAVE_SMI_HANDLER)) + tseg_valid(); + + lock_smm(); +} + +static void late_smm_finalize(void) +{ + printk(BIOS_SPEW, "Lock SMM configuration\n"); + if (mp_run_on_all_cpus(late_smm_lock, NULL) != CB_SUCCESS) + printk(BIOS_WARNING, "Failed to finalize all cores\n"); +} + static void soc_finalize(void *unused) { + if (CONFIG(SOC_AMD_COMMON_LATE_SMM_LOCKING)) + late_smm_finalize(); + if (!acpi_is_wakeup_s3()) { acpi_clear_pm_gpe_status(); diff --git a/src/soc/amd/common/block/cpu/smm/smm_helper.c b/src/soc/amd/common/block/cpu/smm/smm_helper.c index a6e7d2dac5..e87d12c641 100644 --- a/src/soc/amd/common/block/cpu/smm/smm_helper.c +++ b/src/soc/amd/common/block/cpu/smm/smm_helper.c @@ -30,6 +30,20 @@ void clear_tvalid(void) wrmsr(SMM_MASK_MSR, mask); } +void tseg_valid(void) +{ + msr_t mask = rdmsr(SMM_MASK_MSR); + mask.lo |= SMM_TSEG_VALID; + + wrmsr(SMM_MASK_MSR, mask); +} + +bool is_smm_locked(void) +{ + msr_t hwcr = rdmsr(HWCR_MSR); + return hwcr.lo & SMM_LOCK ? true : false; +} + void lock_smm(void) { msr_t hwcr = rdmsr(HWCR_MSR); diff --git a/src/soc/amd/common/block/cpu/smm/smm_relocate.c b/src/soc/amd/common/block/cpu/smm/smm_relocate.c index ae008853bd..d01f4c3e2d 100644 --- a/src/soc/amd/common/block/cpu/smm/smm_relocate.c +++ b/src/soc/amd/common/block/cpu/smm/smm_relocate.c @@ -52,14 +52,6 @@ static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, *smm_save_state_size = sizeof(amd64_smm_state_save_area_t); } -static void tseg_valid(void) -{ - msr_t mask = rdmsr(SMM_MASK_MSR); - mask.lo |= SMM_TSEG_VALID; - - wrmsr(SMM_MASK_MSR, mask); -} - static void smm_relocation_handler(void) { uintptr_t tseg_base; @@ -87,8 +79,11 @@ static void smm_relocation_handler(void) }; wrmsr(SMM_BASE_MSR, smm_base); - tseg_valid(); - lock_smm(); + + if (!CONFIG(SOC_AMD_COMMON_LATE_SMM_LOCKING)) { + tseg_valid(); + lock_smm(); + } } static void post_mp_init(void) |