summaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/Kconfig11
-rw-r--r--src/cpu/x86/lapic/lapic_cpu_init.c47
2 files changed, 57 insertions, 1 deletions
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig
index 131cbf24bb..94225a3807 100644
--- a/src/cpu/x86/Kconfig
+++ b/src/cpu/x86/Kconfig
@@ -96,6 +96,17 @@ config SMM_LAPIC_REMAP_MITIGATION
default y if NORTHBRIDGE_INTEL_NEHALEM
default n
+config SERIALIZED_SMM_INITIALIZATION
+ bool
+ default n
+ help
+ On some CPUs, there is a race condition in SMM.
+ This can occur when both hyperthreads change SMM state
+ variables in parallel without coordination.
+ Setting this option serializes the SMM initialization
+ to avoid an ugly hang in the boot process at the cost
+ of a slightly longer boot time.
+
config X86_AMD_FIXED_MTRRS
bool
default n
diff --git a/src/cpu/x86/lapic/lapic_cpu_init.c b/src/cpu/x86/lapic/lapic_cpu_init.c
index ef150a50d7..bf63517384 100644
--- a/src/cpu/x86/lapic/lapic_cpu_init.c
+++ b/src/cpu/x86/lapic/lapic_cpu_init.c
@@ -458,6 +458,39 @@ static void start_other_cpus(struct bus *cpu_bus, struct device *bsp_cpu)
}
+static void smm_other_cpus(struct bus *cpu_bus, device_t bsp_cpu)
+{
+ device_t cpu;
+ int pre_count = atomic_read(&active_cpus);
+
+ /* Loop through the cpus once to let them run through SMM relocator */
+
+ for(cpu = cpu_bus->children; cpu ; cpu = cpu->sibling) {
+ if (cpu->path.type != DEVICE_PATH_APIC) {
+ continue;
+ }
+
+ printk(BIOS_ERR, "considering CPU 0x%02x for SMM init\n",
+ cpu->path.apic.apic_id);
+
+ if (cpu == bsp_cpu)
+ continue;
+
+ if (!cpu->enabled) {
+ continue;
+ }
+
+ if (!start_cpu(cpu)) {
+ /* Record the error in cpu? */
+ printk(BIOS_ERR, "CPU 0x%02x would not start!\n",
+ cpu->path.apic.apic_id);
+ }
+
+ /* FIXME: endless loop */
+ while (atomic_read(&active_cpus) != pre_count) ;
+ }
+}
+
static void wait_other_cpus_stop(struct bus *cpu_bus)
{
struct device *cpu;
@@ -528,7 +561,8 @@ void initialize_cpus(struct bus *cpu_bus)
#endif
#if CONFIG_HAVE_SMI_HANDLER
- smm_init();
+ if (!IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION))
+ smm_init();
#endif
#if CONFIG_SMP && CONFIG_MAX_CPUS > 1
@@ -547,4 +581,15 @@ void initialize_cpus(struct bus *cpu_bus)
/* Now wait the rest of the cpus stop*/
wait_other_cpus_stop(cpu_bus);
#endif
+
+ if (IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION)) {
+ /* At this point, all APs are sleeping:
+ * smm_init() will queue a pending SMI on all cpus
+ * and smm_other_cpus() will start them one by one */
+ smm_init();
+#if CONFIG_SMP && CONFIG_MAX_CPUS > 1
+ last_cpu_index = 0;
+ smm_other_cpus(cpu_bus, info->cpu);
+#endif
+ }
}