From 82501922b67e41e596447aee5b42bc4655a927f4 Mon Sep 17 00:00:00 2001 From: Aaron Durbin Date: Fri, 29 Apr 2016 22:55:49 -0500 Subject: cpu/x86: combine multiprocessor and SMM initialization In order to reduce code duplication provide a common flow through callback functions that performs the multiprocessor and optionally SMM initialization. The existing MP flight records are utilized but a common flow is provided such that the chipset/cpu only needs to provide a mp_ops structure which has callbacks to gather info and provide hooks at certain points in the sequence. All current users of the MP code can be switched over to this flow since there haven't been any flight records that are overly complicated and long. After the conversion has taken place most of the surface area of the MP API can be hidden away within the compilation unit proper. Change-Id: I6f70969631012982126f0d0d76e5fac6880c24f0 Signed-off-by: Aaron Durbin Reviewed-on: https://review.coreboot.org/14557 Tested-by: build bot (Jenkins) Reviewed-by: Martin Roth --- src/cpu/x86/mp_init.c | 242 +++++++++++++++++++++++++++++++++++++++++++++++ src/include/cpu/x86/mp.h | 96 +++++++++++++++++++ 2 files changed, 338 insertions(+) diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c index 2180d98e89..362cda3bbd 100644 --- a/src/cpu/x86/mp_init.c +++ b/src/cpu/x86/mp_init.c @@ -607,3 +607,245 @@ void smm_initiate_relocation(void) smm_initiate_relocation_parallel(); spin_unlock(&smm_relocation_lock); } + +struct mp_state { + struct mp_ops ops; + int cpu_count; + uintptr_t perm_smbase; + size_t perm_smsize; + size_t smm_save_state_size; + int do_smm; +} mp_state; + +static int is_smm_enabled(void) +{ + return IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) && mp_state.do_smm; +} + +static void smm_disable(void) +{ + mp_state.do_smm = 0; +} + +static void smm_enable(void) +{ + if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) + mp_state.do_smm = 1; +} + +static void asmlinkage smm_do_relocation(void *arg) +{ + const struct smm_module_params *p; + const struct smm_runtime *runtime; + int cpu; + uintptr_t curr_smbase; + uintptr_t perm_smbase; + + p = arg; + runtime = p->runtime; + cpu = p->cpu; + curr_smbase = runtime->smbase; + + if (cpu >= CONFIG_MAX_CPUS) { + printk(BIOS_CRIT, + "Invalid CPU number assigned in SMM stub: %d\n", cpu); + return; + } + + /* + * The permanent handler runs with all cpus concurrently. Precalculate + * the location of the new SMBASE. If using SMM modules then this + * calculation needs to match that of the module loader. + */ + perm_smbase = mp_state.perm_smbase; + perm_smbase -= cpu * runtime->save_state_size; + + printk(BIOS_DEBUG, "New SMBASE 0x%08lx\n", perm_smbase); + + /* Setup code checks this callback for validity. */ + mp_state.ops.relocation_handler(cpu, curr_smbase, perm_smbase); +} + +static void adjust_smm_apic_id_map(struct smm_loader_params *smm_params) +{ + int i; + struct smm_runtime *runtime = smm_params->runtime; + + for (i = 0; i < CONFIG_MAX_CPUS; i++) + runtime->apic_id_to_cpu[i] = mp_get_apic_id(i); +} + +static int install_relocation_handler(int num_cpus, size_t save_state_size) +{ + struct smm_loader_params smm_params = { + .per_cpu_stack_size = save_state_size, + .num_concurrent_stacks = num_cpus, + .per_cpu_save_state_size = save_state_size, + .num_concurrent_save_states = 1, + .handler = smm_do_relocation, + }; + + /* Allow callback to override parameters. */ + if (mp_state.ops.adjust_smm_params != NULL) + mp_state.ops.adjust_smm_params(&smm_params, 0); + + if (smm_setup_relocation_handler(&smm_params)) + return -1; + + adjust_smm_apic_id_map(&smm_params); + + return 0; +} + +static int install_permanent_handler(int num_cpus, uintptr_t smbase, + size_t smsize, size_t save_state_size) +{ + /* There are num_cpus concurrent stacks and num_cpus concurrent save + * state areas. Lastly, set the stack size to the save state size. */ + struct smm_loader_params smm_params = { + .per_cpu_stack_size = save_state_size, + .num_concurrent_stacks = num_cpus, + .per_cpu_save_state_size = save_state_size, + .num_concurrent_save_states = num_cpus, + }; + + /* Allow callback to override parameters. */ + if (mp_state.ops.adjust_smm_params != NULL) + mp_state.ops.adjust_smm_params(&smm_params, 1); + + printk(BIOS_DEBUG, "Installing SMM handler to 0x%08lx\n", smbase); + + if (smm_load_module((void *)smbase, smsize, &smm_params)) + return -1; + + adjust_smm_apic_id_map(&smm_params); + + return 0; +} + +/* Load SMM handlers as part of MP flight record. */ +static void load_smm_handlers(void) +{ + size_t smm_save_state_size = mp_state.smm_save_state_size; + + /* Do nothing if SMM is disabled.*/ + if (!is_smm_enabled()) + return; + + /* Install handlers. */ + if (install_relocation_handler(mp_state.cpu_count, + smm_save_state_size) < 0) { + printk(BIOS_ERR, "Unable to install SMM relocation handler.\n"); + smm_disable(); + } + + if (install_permanent_handler(mp_state.cpu_count, mp_state.perm_smbase, + mp_state.perm_smsize, smm_save_state_size) < 0) { + printk(BIOS_ERR, "Unable to install SMM permanent handler.\n"); + smm_disable(); + } + + /* Ensure the SMM handlers hit DRAM before performing first SMI. */ + wbinvd(); + + /* + * Indicate that the SMM handlers have been loaded and MP + * initialization is about to start. + */ + if (is_smm_enabled() && mp_state.ops.pre_mp_smm_init != NULL) + mp_state.ops.pre_mp_smm_init(); +} + +/* Trigger SMM as part of MP flight record. */ +static void trigger_smm_relocation(void) +{ + /* Do nothing if SMM is disabled.*/ + if (!is_smm_enabled() || mp_state.ops.per_cpu_smm_trigger == NULL) + return; + /* Trigger SMM mode for the currently running processor. */ + mp_state.ops.per_cpu_smm_trigger(); +} + +static struct mp_flight_record mp_steps[] = { + /* Once the APs are up load the SMM handlers. */ + MP_FR_BLOCK_APS(NULL, load_smm_handlers), + /* Perform SMM relocation. */ + MP_FR_NOBLOCK_APS(trigger_smm_relocation, trigger_smm_relocation), + /* Initialize each cpu through the driver framework. */ + MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu), + /* Wait for APs to finish everything else then let them park. */ + MP_FR_BLOCK_APS(NULL, NULL), +}; + +static void fill_mp_state(struct mp_state *state, const struct mp_ops *ops) +{ + /* + * Make copy of the ops so that defaults can be set in the non-const + * structure if needed. + */ + memcpy(&state->ops, ops, sizeof(*ops)); + + if (ops->get_cpu_count != NULL) + state->cpu_count = ops->get_cpu_count(); + + if (ops->get_smm_info != NULL) + ops->get_smm_info(&state->perm_smbase, &state->perm_smsize, + &state->smm_save_state_size); + + /* + * Default to smm_initiate_relocation() if trigger callback isn't + * provided. + */ + if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER) && + ops->per_cpu_smm_trigger == NULL) + mp_state.ops.per_cpu_smm_trigger = smm_initiate_relocation; +} + +int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops) +{ + int ret; + void *default_smm_area; + struct mp_params mp_params; + + if (mp_ops->pre_mp_init != NULL) + mp_ops->pre_mp_init(); + + fill_mp_state(&mp_state, mp_ops); + + memset(&mp_params, 0, sizeof(mp_params)); + + if (mp_state.cpu_count <= 0) { + printk(BIOS_ERR, "Invalid cpu_count: %d\n", mp_state.cpu_count); + return -1; + } + + /* Sanity check SMM state. */ + if (mp_state.perm_smsize != 0 && mp_state.smm_save_state_size != 0 && + mp_state.ops.relocation_handler != NULL) + smm_enable(); + + if (is_smm_enabled()) + printk(BIOS_INFO, "Will perform SMM setup.\n"); + + mp_params.num_cpus = mp_state.cpu_count; + /* Gather microcode information. */ + if (mp_state.ops.get_microcode_info != NULL) + mp_state.ops.get_microcode_info(&mp_params.microcode_pointer, + &mp_params.parallel_microcode_load); + mp_params.adjust_apic_id = mp_state.ops.adjust_cpu_apic_entry; + mp_params.flight_plan = &mp_steps[0]; + mp_params.num_records = ARRAY_SIZE(mp_steps); + + /* Perform backup of default SMM area. */ + default_smm_area = backup_default_smm_area(); + + ret = mp_init(cpu_bus, &mp_params); + + restore_default_smm_area(default_smm_area); + + /* Signal callback on success if it's provided. */ + if (ret == 0 && mp_state.ops.post_mp_init != NULL) + mp_state.ops.post_mp_init(); + + return ret; +} diff --git a/src/include/cpu/x86/mp.h b/src/include/cpu/x86/mp.h index 3227975254..ff88a20301 100644 --- a/src/include/cpu/x86/mp.h +++ b/src/include/cpu/x86/mp.h @@ -17,6 +17,7 @@ #define _X86_MP_H_ #include +#include #define CACHELINE_SIZE 64 @@ -78,6 +79,101 @@ struct mp_params { int num_records; }; +/* The sequence of the callbacks are in calling order. */ +struct mp_ops { + /* + * Optionally provide a callback prior to kicking off MP + * startup. This callback is done prior to loading the SIPI + * vector but after gathering the MP state information. Please + * see the sequence below. + */ + void (*pre_mp_init)(void); + /* + * Return the number of logical x86 execution contexts that + * need to be brought out of SIPI state as well as have SMM + * handlers installed. + */ + int (*get_cpu_count)(void); + /* + * Optionally fill in permanent SMM region and save state size. If + * this callback is not present no SMM handlers will be installed. + * The perm_smsize is the size available to house the permanent SMM + * handler. + */ + void (*get_smm_info)(uintptr_t *perm_smbase, size_t *perm_smsize, + size_t *smm_save_state_size); + /* + * Optionally fill in pointer to microcode and indicate if the APs + * can load the microcode in parallel. + */ + void (*get_microcode_info)(const void **microcode, int *parallel); + /* + * Optionally provide a function which adjusts the APIC id + * map to cpu number. By default the cpu number and APIC id + * are 1:1. To change the APIC id for a given cpu return the + * new APIC id. It's called for each cpu as indicated by + * get_cpu_count(). + */ + int (*adjust_cpu_apic_entry)(int cpu, int cur_apic_id); + /* + * Optionally adjust SMM handler parameters to override the default + * values. The is_perm variable indicates if the parameters to adjust + * are for the relocation handler or the permanent handler. This + * function is therefore called twice -- once for each handler. + * By default the parameters for each SMM handler are: + * stack_size num_concurrent_stacks num_concurrent_save_states + * relo: save_state_size get_cpu_count() 1 + * perm: save_state_size get_cpu_count() get_cpu_count() + */ + void (*adjust_smm_params)(struct smm_loader_params *slp, int is_perm); + /* + * Optionally provide a callback prior to the APs starting SMM + * relocation or cpu driver initialization. However, note that + * this callback is called after SMM handlers have been loaded. + */ + void (*pre_mp_smm_init)(void); + /* + * Optional function to use to trigger SMM to perform relocation. If + * not provided, smm_initiate_relocation() is used. + */ + void (*per_cpu_smm_trigger)(void); + /* + * This function is called while each cpu is in the SMM relocation + * handler. Its primary purpose is to adjust the SMBASE for the + * permanent handler. The parameters passed are the current cpu + * running the relocation handler, current SMBASE of relocation handler, + * and the pre-calculated staggered cpu SMBASE address of the permanent + * SMM handler. + */ + void (*relocation_handler)(int cpu, uintptr_t curr_smbase, + uintptr_t staggered_smbase); + /* + * Optionally provide a callback that is called after the APs + * and the BSP have gone through the initialion sequence. + */ + void (*post_mp_init)(void); +}; + +/* + * mp_init_with_smm() returns < 0 on failure and 0 on success. The mp_ops + * argument is used to drive the multiprocess initialization. Unless otherwise + * stated each callback is called on the BSP only. The sequence of operations + * is the following: + * 1. pre_mp_init() + * 2. get_cpu_count() + * 3. get_smm_info() + * 4. get_microcode_info() + * 5. adjust_cpu_apic_entry() for each number of get_cpu_count() + * 6. adjust_smm_params(is_perm=0) + * 7. adjust_smm_params(is_perm=1) + * 8. pre_mp_smm_init() + * 9. per_cpu_smm_trigger() in parallel for all cpus which calls + * relocation_handler() in SMM. + * 10. mp_initialize_cpu() for each cpu + * 11. post_mp_init() + */ +int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops); + /* * mp_init() will set up the SIPI vector and bring up the APs according to * mp_params. Each flight record will be executed according to the plan. Note -- cgit v1.2.3