From faa1118fc7d6d80d9c37bab8b9330325d8157466 Mon Sep 17 00:00:00 2001 From: Eugene Myers Date: Thu, 6 Feb 2020 10:37:01 -0500 Subject: cpu/x86: Put guard around align for smm_save_state_size The STM support aligns the smm_save_state_size. However, this creates issue for some platforms because of this value being hard coded to 0x400 Signed-off-by: Eugene D. Myers Change-Id: Ia584f7e9b86405a12eb6cbedc3a2615a8727f69e Reviewed-on: https://review.coreboot.org/c/coreboot/+/38734 Reviewed-by: Patrick Rudolph Reviewed-by: Patrick Georgi Reviewed-by: ron minnich Tested-by: build bot (Jenkins) --- src/cpu/x86/mp_init.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index 331f3b552a..c747207f7c 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -1044,17 +1044,22 @@ static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops) /* * Make sure there is enough room for the SMM descriptor */ - if (CONFIG(STM)) + if (CONFIG(STM)) { state->smm_save_state_size += sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR); - /* Currently, the CPU SMM save state size is based on a simplistic - * algorithm. (align on 4K) - * note: In the future, this will need to handle newer x86 processors - * that require alignment of the save state on 32K boundaries. - */ - state->smm_save_state_size = - ALIGN_UP(state->smm_save_state_size, 0x1000); + /* Currently, the CPU SMM save state size is based on a simplistic + * algorithm. (align on 4K) + * note: In the future, this will need to handle newer x86 processors + * that require alignment of the save state on 32K boundaries. + * The alignment is done here because coreboot has a hard coded + * value of 0x400 for this value. + * Also, this alignment only works on CPUs less than 5 threads + */ + if (CONFIG(STM)) + state->smm_save_state_size = + ALIGN_UP(state->smm_save_state_size, 0x1000); + } /* * Default to smm_initiate_relocation() if trigger callback isn't -- cgit v1.2.3