diff options
author | Arthur Heymans <arthur@aheymans.xyz> | 2022-05-19 11:31:10 +0200 |
---|---|---|
committer | Felix Held <felix-coreboot@felixheld.de> | 2022-09-15 14:47:52 +0000 |
commit | 56776a1ab39333c791903e0a7e79e8fb51d3162d (patch) | |
tree | ffce5f6c84b0643cf6eaf0b288d2566d30796064 /src/cpu | |
parent | 576861994ea5011c3a836a826b8189ef79c366cb (diff) |
soc/amd: Do SMM relocation via MSR
AMD CPUs have a convenient MSR that allows to set the SMBASE in the save
state without ever entering SMM (e.g. at the default 0x30000 address).
This has been a feature in all AMD CPUs since at least AMD K8. This
allows to do relocation in parallel in ramstage and without setting up a
relocation handler, which likely results in a speedup. The more cores
the higher the speedup as relocation was happening sequentially. On a 4
core AMD picasso system this results in 33ms boot speedup.
TESTED on google/vilboz (Picasso) with CONFIG_SMI_DEBUG: verify that SMM
is correctly relocated with the BSP correctly entering the smihandler.
Change-Id: I9729fb94ed5c18cfd57b8098c838c08a04490e4b
Signed-off-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/64872
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Felix Held <felix-coreboot@felixheld.de>
Diffstat (limited to 'src/cpu')
-rw-r--r-- | src/cpu/x86/Kconfig | 9 | ||||
-rw-r--r-- | src/cpu/x86/mp_init.c | 21 |
2 files changed, 24 insertions, 6 deletions
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig index 8d76638ff9..bd3be78b0c 100644 --- a/src/cpu/x86/Kconfig +++ b/src/cpu/x86/Kconfig @@ -18,6 +18,15 @@ config PARALLEL_MP_AP_WORK Allow APs to do other work after initialization instead of going to sleep. +config X86_SMM_SKIP_RELOCATION_HANDLER + bool + default n + depends on PARALLEL_MP && HAVE_SMI_HANDLER + help + Skip SMM relocation using a relocation handler running in SMM + with a stub at 0x30000. This is useful on platforms that have + an alternative way to set SMBASE. + config LEGACY_SMP_INIT bool diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index 0d9cd418e8..2255841220 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -755,6 +755,9 @@ static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params) static enum cb_err install_relocation_handler(int num_cpus, size_t save_state_size) { + if (CONFIG(X86_SMM_SKIP_RELOCATION_HANDLER)) + return CB_SUCCESS; + struct smm_loader_params smm_params = { .num_cpus = num_cpus, .cpu_save_state_size = save_state_size, @@ -1136,9 +1139,13 @@ static enum cb_err do_mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops } /* Sanity check SMM state. */ - if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 && - mp_state.ops.relocation_handler != NULL) - smm_enable(); + smm_enable(); + if (mp_state.perm_smsize == 0) + smm_disable(); + if (mp_state.smm_save_state_size == 0) + smm_disable(); + if (!CONFIG(X86_SMM_SKIP_RELOCATION_HANDLER) && mp_state.ops.relocation_handler == NULL) + smm_disable(); if (is_smm_enabled()) printk(BIOS_INFO, "Will perform SMM setup.\n"); @@ -1151,12 +1158,14 @@ static enum cb_err do_mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops mp_params.flight_plan = &mp_steps[0]; mp_params.num_records = ARRAY_SIZE(mp_steps); - /* Perform backup of default SMM area. */ - default_smm_area = backup_default_smm_area(); + /* Perform backup of default SMM area when using SMM relocation handler. */ + if (!CONFIG(X86_SMM_SKIP_RELOCATION_HANDLER)) + default_smm_area = backup_default_smm_area(); ret = mp_init(cpu_bus, &mp_params); - restore_default_smm_area(default_smm_area); + if (!CONFIG(X86_SMM_SKIP_RELOCATION_HANDLER)) + restore_default_smm_area(default_smm_area); /* Signal callback on success if it's provided. */ if (ret == CB_SUCCESS && mp_state.ops.post_mp_init != NULL) |