diff options
author | Arthur Heymans <arthur@aheymans.xyz> | 2021-03-02 16:07:52 +0100 |
---|---|---|
committer | Patrick Georgi <pgeorgi@google.com> | 2021-04-19 06:36:28 +0000 |
commit | 88407bcd9d7f26e9a8c42f0592bd7163764c87e6 (patch) | |
tree | 10da862fee24f850f8882facc70af7c13ce63bb2 /src/cpu | |
parent | e6c3523b1b4691940312c083446fcaa59c5d55a4 (diff) |
cpu/x86/smm: Drop the V1 smmloader
Change-Id: I536a104428ae86e82977f2510b9e76715398b442
Signed-off-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/51187
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
Diffstat (limited to 'src/cpu')
-rw-r--r-- | src/cpu/qemu-x86/Kconfig | 14 | ||||
-rw-r--r-- | src/cpu/x86/Kconfig | 8 | ||||
-rw-r--r-- | src/cpu/x86/mp_init.c | 13 | ||||
-rw-r--r-- | src/cpu/x86/smm/Makefile.inc | 4 | ||||
-rw-r--r-- | src/cpu/x86/smm/smm_module_loader.c | 396 |
5 files changed, 4 insertions, 431 deletions
diff --git a/src/cpu/qemu-x86/Kconfig b/src/cpu/qemu-x86/Kconfig index d60f70fb48..85f99e9cb6 100644 --- a/src/cpu/qemu-x86/Kconfig +++ b/src/cpu/qemu-x86/Kconfig @@ -43,20 +43,6 @@ config CPU_QEMU_X86_TSEG_SMM endchoice -choice - prompt "SMM loader" - default CPU_QEMU_X86_SMMLOADERV1 - depends on SMM_TSEG - -config CPU_QEMU_X86_SMMLOADERV1 - bool "smmloader v1" - -config CPU_QEMU_X86_SMMLOADERV2 - bool "smmloader v2" - select X86_SMM_LOADER_VERSION2 - -endchoice - config MAX_CPUS int default 32 if SMM_TSEG diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig index b3a16bcf63..5394cd023d 100644 --- a/src/cpu/x86/Kconfig +++ b/src/cpu/x86/Kconfig @@ -121,14 +121,6 @@ config SMM_STUB_STACK_SIZE endif -config X86_SMM_LOADER_VERSION2 - bool - default n - depends on HAVE_SMI_HANDLER - help - This option enables SMM module loader that works with server - platforms which may contain more than 32 CPU threads. - config SMM_LAPIC_REMAP_MITIGATION bool default y if NORTHBRIDGE_INTEL_I945 diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index bac74f9243..893e8f1fd6 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -757,15 +757,10 @@ static void asmlinkage smm_do_relocation(void *arg) * the location of the new SMBASE. If using SMM modules then this * calculation needs to match that of the module loader. */ - if (CONFIG(X86_SMM_LOADER_VERSION2)) { - perm_smbase = smm_get_cpu_smbase(cpu); - if (!perm_smbase) { - printk(BIOS_ERR, "%s: bad SMBASE for CPU %d\n", __func__, cpu); - return; - } - } else { - perm_smbase = mp_state.perm_smbase; - perm_smbase -= cpu * mp_state.smm_save_state_size; + perm_smbase = smm_get_cpu_smbase(cpu); + if (!perm_smbase) { + printk(BIOS_ERR, "%s: bad SMBASE for CPU %d\n", __func__, cpu); + return; } /* Setup code checks this callback for validity. */ diff --git a/src/cpu/x86/smm/Makefile.inc b/src/cpu/x86/smm/Makefile.inc index 9d74558670..e0b48f746f 100644 --- a/src/cpu/x86/smm/Makefile.inc +++ b/src/cpu/x86/smm/Makefile.inc @@ -1,10 +1,6 @@ ## SPDX-License-Identifier: GPL-2.0-only -ifeq ($(CONFIG_X86_SMM_LOADER_VERSION2),y) ramstage-y += smm_module_loaderv2.c -else -ramstage-y += smm_module_loader.c -endif ramstage-y += smi_trigger.c ifeq ($(CONFIG_ARCH_RAMSTAGE_X86_32),y) diff --git a/src/cpu/x86/smm/smm_module_loader.c b/src/cpu/x86/smm/smm_module_loader.c deleted file mode 100644 index fae8742d11..0000000000 --- a/src/cpu/x86/smm/smm_module_loader.c +++ /dev/null @@ -1,396 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ - -#include <stdint.h> -#include <string.h> -#include <acpi/acpi_gnvs.h> -#include <rmodule.h> -#include <cpu/x86/smm.h> -#include <commonlib/helpers.h> -#include <console/console.h> -#include <security/intel/stm/SmmStm.h> - -#define FXSAVE_SIZE 512 - -/* FXSAVE area during relocation. While it may not be strictly needed the - SMM stub code relies on the FXSAVE area being non-zero to enable SSE - instructions within SMM mode. */ -static uint8_t fxsave_area_relocation[CONFIG_MAX_CPUS][FXSAVE_SIZE] -__attribute__((aligned(16))); - -/* - * Components that make up the SMRAM: - * 1. Save state - the total save state memory used - * 2. Stack - stacks for the CPUs in the SMM handler - * 3. Stub - SMM stub code for calling into handler - * 4. Handler - C-based SMM handler. - * - * The components are assumed to consist of one consecutive region. - */ - -/* - * The stub is the entry point that sets up protected mode and stacks for each - * CPU. It then calls into the SMM handler module. It is encoded as an rmodule. - */ -extern unsigned char _binary_smmstub_start[]; - -/* Per CPU minimum stack size. */ -#define SMM_MINIMUM_STACK_SIZE 32 - -/* - * The smm_entry_ins consists of 3 bytes. It is used when staggering SMRAM entry - * addresses across CPUs. - * - * 0xe9 <16-bit relative target> ; jmp <relative-offset> - */ -struct smm_entry_ins { - char jmp_rel; - uint16_t rel16; -} __packed; - -/* - * Place the entry instructions for num entries beginning at entry_start with - * a given stride. The entry_start is the highest entry point's address. All - * other entry points are stride size below the previous. - */ -static void smm_place_jmp_instructions(void *entry_start, size_t stride, - size_t num, void *jmp_target) -{ - size_t i; - char *cur; - struct smm_entry_ins entry = { .jmp_rel = 0xe9 }; - - /* Each entry point has an IP value of 0x8000. The SMBASE for each - * CPU is different so the effective address of the entry instruction - * is different. Therefore, the relative displacement for each entry - * instruction needs to be updated to reflect the current effective - * IP. Additionally, the IP result from the jmp instruction is - * calculated using the next instruction's address so the size of - * the jmp instruction needs to be taken into account. */ - cur = entry_start; - for (i = 0; i < num; i++) { - uint32_t disp = (uintptr_t)jmp_target; - - disp -= sizeof(entry) + (uintptr_t)cur; - printk(BIOS_DEBUG, - "SMM Module: placing jmp sequence at %p rel16 0x%04x\n", - cur, disp); - entry.rel16 = disp; - memcpy(cur, &entry, sizeof(entry)); - cur -= stride; - } -} - -/* Place stacks in base -> base + size region, but ensure the stacks don't - * overlap the staggered entry points. */ -static void *smm_stub_place_stacks(char *base, size_t size, - struct smm_loader_params *params) -{ - size_t total_stack_size; - char *stacks_top; - - if (params->stack_top != NULL) - return params->stack_top; - - /* If stack space is requested assume the space lives in the lower - * half of SMRAM. */ - total_stack_size = params->per_cpu_stack_size * - params->num_concurrent_stacks; - - /* There has to be at least one stack user. */ - if (params->num_concurrent_stacks < 1) - return NULL; - - /* Total stack size cannot fit. */ - if (total_stack_size > size) - return NULL; - - /* Stacks extend down to SMBASE */ - stacks_top = &base[total_stack_size]; - - return stacks_top; -} - -/* Place the staggered entry points for each CPU. The entry points are - * staggered by the per CPU SMM save state size extending down from - * SMM_ENTRY_OFFSET. */ -static void smm_stub_place_staggered_entry_points(char *base, - const struct smm_loader_params *params, const struct rmodule *smm_stub) -{ - size_t stub_entry_offset; - - stub_entry_offset = rmodule_entry_offset(smm_stub); - - /* If there are staggered entry points or the stub is not located - * at the SMM entry point then jmp instructions need to be placed. */ - if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) { - size_t num_entries; - - base += SMM_ENTRY_OFFSET; - num_entries = params->num_concurrent_save_states; - /* Adjust beginning entry and number of entries down since - * the initial entry point doesn't need a jump sequence. */ - if (stub_entry_offset == 0) { - base -= params->per_cpu_save_state_size; - num_entries--; - } - smm_place_jmp_instructions(base, - params->per_cpu_save_state_size, - num_entries, - rmodule_entry(smm_stub)); - } -} - -/* - * The stub setup code assumes it is completely contained within the - * default SMRAM size (0x10000). There are potentially 3 regions to place - * within the default SMRAM size: - * 1. Save state areas - * 2. Stub code - * 3. Stack areas - * - * The save state and stack areas are treated as contiguous for the number of - * concurrent areas requested. The save state always lives at the top of SMRAM - * space, and the entry point is at offset 0x8000. - */ -static int smm_module_setup_stub(void *smbase, size_t smm_size, - struct smm_loader_params *params, - void *fxsave_area) -{ - size_t total_save_state_size; - size_t smm_stub_size; - size_t stub_entry_offset; - char *smm_stub_loc; - void *stacks_top; - size_t size; - char *base; - size_t i; - struct smm_stub_params *stub_params; - struct rmodule smm_stub; - - base = smbase; - size = SMM_DEFAULT_SIZE; - - /* The number of concurrent stacks cannot exceed CONFIG_MAX_CPUS. */ - if (params->num_concurrent_stacks > CONFIG_MAX_CPUS) - return -1; - - /* Fail if can't parse the smm stub rmodule. */ - if (rmodule_parse(&_binary_smmstub_start, &smm_stub)) - return -1; - - /* Adjust remaining size to account for save state. */ - total_save_state_size = params->per_cpu_save_state_size * - params->num_concurrent_save_states; - if (total_save_state_size > size) - return -1; - size -= total_save_state_size; - - /* The save state size encroached over the first SMM entry point. */ - if (size <= SMM_ENTRY_OFFSET) - return -1; - - /* Need a minimum stack size and alignment. */ - if (params->per_cpu_stack_size <= SMM_MINIMUM_STACK_SIZE || - (params->per_cpu_stack_size & 3) != 0) - return -1; - - smm_stub_loc = NULL; - smm_stub_size = rmodule_memory_size(&smm_stub); - stub_entry_offset = rmodule_entry_offset(&smm_stub); - - /* Assume the stub is always small enough to live within upper half of - * SMRAM region after the save state space has been allocated. */ - smm_stub_loc = &base[SMM_ENTRY_OFFSET]; - - /* Adjust for jmp instruction sequence. */ - if (stub_entry_offset != 0) { - size_t entry_sequence_size = sizeof(struct smm_entry_ins); - /* Align up to 16 bytes. */ - entry_sequence_size = ALIGN_UP(entry_sequence_size, 16); - smm_stub_loc += entry_sequence_size; - smm_stub_size += entry_sequence_size; - } - - /* Stub is too big to fit. */ - if (smm_stub_size > (size - SMM_ENTRY_OFFSET)) - return -1; - - /* The stacks, if requested, live in the lower half of SMRAM space. */ - size = SMM_ENTRY_OFFSET; - - /* Ensure stacks don't encroach onto staggered SMM - * entry points. The staggered entry points extend - * below SMM_ENTRY_OFFSET by the number of concurrent - * save states - 1 and save state size. */ - if (params->num_concurrent_save_states > 1) { - size -= total_save_state_size; - size += params->per_cpu_save_state_size; - } - - /* Place the stacks in the lower half of SMRAM. */ - stacks_top = smm_stub_place_stacks(base, size, params); - if (stacks_top == NULL) - return -1; - - /* Load the stub. */ - if (rmodule_load(smm_stub_loc, &smm_stub)) - return -1; - - /* Place staggered entry points. */ - smm_stub_place_staggered_entry_points(base, params, &smm_stub); - - /* Setup the parameters for the stub code. */ - stub_params = rmodule_parameters(&smm_stub); - stub_params->stack_top = (uintptr_t)stacks_top; - stub_params->stack_size = params->per_cpu_stack_size; - stub_params->c_handler = (uintptr_t)params->handler; - stub_params->fxsave_area = (uintptr_t)fxsave_area; - stub_params->fxsave_area_size = FXSAVE_SIZE; - - /* Initialize the APIC id to CPU number table to be 1:1 */ - for (i = 0; i < params->num_concurrent_stacks; i++) - stub_params->apic_id_to_cpu[i] = i; - - /* Allow the initiator to manipulate SMM stub parameters. */ - params->stub_params = stub_params; - - printk(BIOS_DEBUG, "SMM Module: stub loaded at %p. Will call %p\n", - smm_stub_loc, params->handler); - - return 0; -} - -/* - * smm_setup_relocation_handler assumes the callback is already loaded in - * memory. i.e. Another SMM module isn't chained to the stub. The other - * assumption is that the stub will be entered from the default SMRAM - * location: 0x30000 -> 0x40000. - */ -int smm_setup_relocation_handler(void *const perm_smram, struct smm_loader_params *params) -{ - void *smram = (void *)SMM_DEFAULT_BASE; - - /* There can't be more than 1 concurrent save state for the relocation - * handler because all CPUs default to 0x30000 as SMBASE. */ - if (params->num_concurrent_save_states > 1) - return -1; - - /* A handler has to be defined to call for relocation. */ - if (params->handler == NULL) - return -1; - - /* Since the relocation handler always uses stack, adjust the number - * of concurrent stack users to be CONFIG_MAX_CPUS. */ - if (params->num_concurrent_stacks == 0) - params->num_concurrent_stacks = CONFIG_MAX_CPUS; - - return smm_module_setup_stub(smram, SMM_DEFAULT_SIZE, - params, fxsave_area_relocation); -} - -/* The SMM module is placed within the provided region in the following - * manner: - * +-----------------+ <- smram + size - * | BIOS resource | - * | list (STM) | - * +-----------------+ <- smram + size - CONFIG_BIOS_RESOURCE_LIST_SIZE - * | stacks | - * +-----------------+ <- .. - total_stack_size - * | fxsave area | - * +-----------------+ <- .. - total_stack_size - fxsave_size - * | ... | - * +-----------------+ <- smram + handler_size + SMM_DEFAULT_SIZE - * | handler | - * +-----------------+ <- smram + SMM_DEFAULT_SIZE - * | stub code | - * +-----------------+ <- smram - * - * It should be noted that this algorithm will not work for - * SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm - * expects a region large enough to encompass the handler and stacks - * as well as the SMM_DEFAULT_SIZE. - */ -int smm_load_module(void *smram, size_t size, struct smm_loader_params *params) -{ - struct rmodule smm_mod; - struct smm_runtime *handler_mod_params; - size_t total_stack_size; - size_t handler_size; - size_t module_alignment; - size_t alignment_size; - size_t fxsave_size; - void *fxsave_area; - size_t total_size; - char *base; - - if (size <= SMM_DEFAULT_SIZE) - return -1; - - /* Fail if can't parse the smm rmodule. */ - if (rmodule_parse(&_binary_smm_start, &smm_mod)) - return -1; - - /* Clear SMM region */ - if (CONFIG(DEBUG_SMI)) - memset(smram, 0xcd, size); - - total_stack_size = params->per_cpu_stack_size * - params->num_concurrent_stacks; - - /* Stacks start at the top of the region. */ - base = smram; - base += size; - - if (CONFIG(STM)) - base -= CONFIG_MSEG_SIZE + CONFIG_BIOS_RESOURCE_LIST_SIZE; - - params->stack_top = base; - - /* SMM module starts at offset SMM_DEFAULT_SIZE with the load alignment - * taken into account. */ - base = smram; - base += SMM_DEFAULT_SIZE; - handler_size = rmodule_memory_size(&smm_mod); - module_alignment = rmodule_load_alignment(&smm_mod); - alignment_size = module_alignment - - ((uintptr_t)base % module_alignment); - if (alignment_size != module_alignment) { - handler_size += alignment_size; - base += alignment_size; - } - - if (CONFIG(SSE)) { - fxsave_size = FXSAVE_SIZE * params->num_concurrent_stacks; - /* FXSAVE area below all the stacks stack. */ - fxsave_area = params->stack_top; - fxsave_area -= total_stack_size + fxsave_size; - } else { - fxsave_size = 0; - fxsave_area = NULL; - } - - /* Does the required amount of memory exceed the SMRAM region size? */ - total_size = total_stack_size + handler_size; - total_size += fxsave_size + SMM_DEFAULT_SIZE; - - if (total_size > size) - return -1; - - if (rmodule_load(base, &smm_mod)) - return -1; - - params->handler = rmodule_entry(&smm_mod); - handler_mod_params = rmodule_parameters(&smm_mod); - handler_mod_params->smbase = (uintptr_t)smram; - handler_mod_params->smm_size = size; - handler_mod_params->save_state_size = params->real_cpu_save_state_size; - handler_mod_params->num_cpus = params->num_concurrent_stacks; - handler_mod_params->gnvs_ptr = (uintptr_t)acpi_get_gnvs(); - - for (int i = 0; i < CONFIG_MAX_CPUS; i++) { - handler_mod_params->save_state_top[i] = (uintptr_t)smram + SMM_DEFAULT_SIZE - - params->per_cpu_save_state_size * i; - } - - return smm_module_setup_stub(smram, size, params, fxsave_area); -} |