summaryrefslogtreecommitdiff
path: root/src/soc/amd
diff options
context:
space:
mode:
authorArthur Heymans <arthur@aheymans.xyz>2022-05-31 22:00:13 +0200
committerFelix Held <felix-coreboot@felixheld.de>2022-06-17 15:27:21 +0000
commit8cd1dfa4ae98a11300113582647552a242cf991b (patch)
treec52cbdacfdde571deccbef4e39c6f65aff365367 /src/soc/amd
parent5b67ad0a5f7a1ba9d5a4dc46c7d2dec1665f00d7 (diff)
soc/amd/smm_relocate.c: Improve TSEG programming
TSEG does not need to be aligned to 128KiB but to its size, as the MSR works like an MTRR. 128KiB is a minimum TSEG size however. TESTED on google/vilboz. Change-Id: I30854111bb47f0cb14b07f71cedacd629432e0f4 Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-on: https://review.coreboot.org/c/coreboot/+/64865 Reviewed-by: Paul Menzel <paulepanter@mailbox.org> Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Marshall Dawson <marshalldawson3rd@gmail.com>
Diffstat (limited to 'src/soc/amd')
-rw-r--r--src/soc/amd/common/block/cpu/smm/smm_relocate.c44
-rw-r--r--src/soc/amd/common/block/include/amdblocks/smm.h5
2 files changed, 26 insertions, 23 deletions
diff --git a/src/soc/amd/common/block/cpu/smm/smm_relocate.c b/src/soc/amd/common/block/cpu/smm/smm_relocate.c
index 1d7cc68d68..f3fcc79a86 100644
--- a/src/soc/amd/common/block/cpu/smm/smm_relocate.c
+++ b/src/soc/amd/common/block/cpu/smm/smm_relocate.c
@@ -9,28 +9,25 @@
#include <cpu/x86/smm.h>
#include <types.h>
-static struct smm_relocation_params smm_reloc_params;
-
-static void fill_in_relocation_params(struct smm_relocation_params *params)
+void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size)
{
+ printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
+
uintptr_t tseg_base;
size_t tseg_size;
smm_region(&tseg_base, &tseg_size);
- params->tseg_base.lo = ALIGN_DOWN(tseg_base, 128 * KiB);
- params->tseg_base.hi = 0;
- params->tseg_mask.lo = ALIGN_DOWN(~(tseg_size - 1), 128 * KiB);
- params->tseg_mask.hi = ((1 << (cpu_phys_address_size() - 32)) - 1);
-
- params->tseg_mask.lo |= SMM_TSEG_WB;
-}
-
-void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size)
-{
- printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
+ if (!IS_ALIGNED(tseg_base, tseg_size)) {
+ printk(BIOS_ERR, "TSEG base not aligned to TSEG size\n");
+ return;
+ }
+ /* Minimum granularity for TSEG MSRs */
+ if (tseg_size < 128 * KiB) {
+ printk(BIOS_ERR, "TSEG size (0x%zx) too small\n", tseg_size);
+ return;
+ }
- fill_in_relocation_params(&smm_reloc_params);
smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
*smm_save_state_size = sizeof(amd64_smm_state_save_area_t);
@@ -38,11 +35,22 @@ void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_
void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
{
- struct smm_relocation_params *relo_params = &smm_reloc_params;
amd64_smm_state_save_area_t *smm_state;
- wrmsr(SMM_ADDR_MSR, relo_params->tseg_base);
- wrmsr(SMM_MASK_MSR, relo_params->tseg_mask);
+ uintptr_t tseg_base;
+ size_t tseg_size;
+
+ smm_region(&tseg_base, &tseg_size);
+
+ msr_t msr;
+ msr.lo = tseg_base;
+ msr.hi = 0;
+ wrmsr(SMM_ADDR_MSR, msr);
+
+ msr.lo = ~(tseg_size - 1);
+ msr.lo |= SMM_TSEG_WB;
+ msr.hi = (1 << (cpu_phys_address_size() - 32)) - 1;
+ wrmsr(SMM_MASK_MSR, msr);
smm_state = (void *)(SMM_AMD64_SAVE_STATE_OFFSET + curr_smbase);
smm_state->smbase = staggered_smbase;
diff --git a/src/soc/amd/common/block/include/amdblocks/smm.h b/src/soc/amd/common/block/include/amdblocks/smm.h
index 4a44203ba9..91246073d5 100644
--- a/src/soc/amd/common/block/include/amdblocks/smm.h
+++ b/src/soc/amd/common/block/include/amdblocks/smm.h
@@ -6,11 +6,6 @@
#include <cpu/x86/msr.h>
#include <types.h>
-struct smm_relocation_params {
- msr_t tseg_base;
- msr_t tseg_mask;
-};
-
void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size);
void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase);
void *get_smi_source_handler(int source);