summaryrefslogtreecommitdiff
path: root/src/soc/amd
diff options
context:
space:
mode:
Diffstat (limited to 'src/soc/amd')
-rw-r--r--src/soc/amd/cezanne/cpu.c35
-rw-r--r--src/soc/amd/common/block/cpu/smm/smm_relocate.c28
-rw-r--r--src/soc/amd/common/block/include/amdblocks/smm.h2
-rw-r--r--src/soc/amd/mendocino/cpu.c34
-rw-r--r--src/soc/amd/picasso/cpu.c38
-rw-r--r--src/soc/amd/stoneyridge/cpu.c41
6 files changed, 38 insertions, 140 deletions
diff --git a/src/soc/amd/cezanne/cpu.c b/src/soc/amd/cezanne/cpu.c
index 8b4e347331..8c15b55c40 100644
--- a/src/soc/amd/cezanne/cpu.c
+++ b/src/soc/amd/cezanne/cpu.c
@@ -1,57 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+#include <acpi/acpi.h>
#include <amdblocks/cpu.h>
#include <amdblocks/iomap.h>
#include <amdblocks/mca.h>
-#include <amdblocks/reset.h>
-#include <amdblocks/smm.h>
#include <assert.h>
#include <console/console.h>
#include <cpu/amd/microcode.h>
-#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
-#include <acpi/acpi.h>
#include <device/device.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
-#include <types.h>
_Static_assert(CONFIG_MAX_CPUS == 16, "Do not override MAX_CPUS. To reduce the number of "
"available cores, use the downcore_mode and disable_smt devicetree settings instead.");
/* MP and SMM loading initialization */
-/*
- * Do essential initialization tasks before APs can be fired up -
- *
- * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
- * creates the MTRR solution that the APs will use. Otherwise APs will try to
- * apply the incomplete solution as the BSP is calculating it.
- */
-static void pre_mp_init(void)
-{
- const msr_t syscfg = rdmsr(SYSCFG_MSR);
- if (syscfg.lo & SYSCFG_MSR_TOM2WB)
- x86_setup_mtrrs_with_detect_no_above_4gb();
- else
- x86_setup_mtrrs_with_detect();
- x86_mtrr_check();
-}
-
-static const struct mp_ops mp_ops = {
- .pre_mp_init = pre_mp_init,
- .get_cpu_count = get_cpu_count,
- .get_smm_info = get_smm_info,
- .relocation_handler = smm_relocation_handler,
- .post_mp_init = global_smi_enable,
-};
-
void mp_init_cpus(struct bus *cpu_bus)
{
- if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
+ extern const struct mp_ops amd_mp_ops_with_smm;
+ if (mp_init_with_smm(cpu_bus, &amd_mp_ops_with_smm) != CB_SUCCESS)
die_with_post_code(POST_HW_INIT_FAILURE,
"mp_init_with_smm failed. Halting.\n");
diff --git a/src/soc/amd/common/block/cpu/smm/smm_relocate.c b/src/soc/amd/common/block/cpu/smm/smm_relocate.c
index f3fcc79a86..e464cfca6d 100644
--- a/src/soc/amd/common/block/cpu/smm/smm_relocate.c
+++ b/src/soc/amd/common/block/cpu/smm/smm_relocate.c
@@ -1,15 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+#include <cpu/x86/mtrr.h>
+#include <cpu/x86/mp.h>
+#include <amdblocks/cpu.h>
#include <amdblocks/smm.h>
#include <console/console.h>
#include <cpu/amd/amd64_save_state.h>
#include <cpu/amd/msr.h>
+#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/smm.h>
#include <types.h>
-void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size)
+/* AP MTRRs will be synced to the BSP in the SIPI vector so set them up before MP init. */
+static void pre_mp_init(void)
+{
+ const msr_t syscfg = rdmsr(SYSCFG_MSR);
+ if (syscfg.lo & SYSCFG_MSR_TOM2WB)
+ x86_setup_mtrrs_with_detect_no_above_4gb();
+ else
+ x86_setup_mtrrs_with_detect();
+ x86_mtrr_check();
+}
+
+static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
+ size_t *smm_save_state_size)
{
printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
@@ -33,7 +49,7 @@ void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_
*smm_save_state_size = sizeof(amd64_smm_state_save_area_t);
}
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
+static void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
{
amd64_smm_state_save_area_t *smm_state;
@@ -55,3 +71,11 @@ void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_
smm_state = (void *)(SMM_AMD64_SAVE_STATE_OFFSET + curr_smbase);
smm_state->smbase = staggered_smbase;
}
+
+const struct mp_ops amd_mp_ops_with_smm = {
+ .pre_mp_init = pre_mp_init,
+ .get_cpu_count = get_cpu_count,
+ .get_smm_info = get_smm_info,
+ .relocation_handler = smm_relocation_handler,
+ .post_mp_init = global_smi_enable,
+};
diff --git a/src/soc/amd/common/block/include/amdblocks/smm.h b/src/soc/amd/common/block/include/amdblocks/smm.h
index 7fa8648ba6..f0a06bf015 100644
--- a/src/soc/amd/common/block/include/amdblocks/smm.h
+++ b/src/soc/amd/common/block/include/amdblocks/smm.h
@@ -6,8 +6,6 @@
#include <cpu/x86/msr.h>
#include <types.h>
-void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size);
-void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase);
void *get_smi_source_handler(int source);
void handle_smi_gsmi(void);
void handle_smi_store(void);
diff --git a/src/soc/amd/mendocino/cpu.c b/src/soc/amd/mendocino/cpu.c
index a0aa05ccbd..bee4207600 100644
--- a/src/soc/amd/mendocino/cpu.c
+++ b/src/soc/amd/mendocino/cpu.c
@@ -6,55 +6,25 @@
#include <amdblocks/cpu.h>
#include <amdblocks/iomap.h>
#include <amdblocks/mca.h>
-#include <amdblocks/reset.h>
-#include <amdblocks/smm.h>
-#include <assert.h>
#include <console/console.h>
#include <cpu/amd/microcode.h>
-#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
#include <acpi/acpi.h>
#include <device/device.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
-#include <types.h>
_Static_assert(CONFIG_MAX_CPUS == 8, "Do not override MAX_CPUS. To reduce the number of "
"available cores, use the downcore_mode and disable_smt devicetree settings instead.");
/* MP and SMM loading initialization */
-/*
- * Do essential initialization tasks before APs can be fired up -
- *
- * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
- * creates the MTRR solution that the APs will use. Otherwise APs will try to
- * apply the incomplete solution as the BSP is calculating it.
- */
-static void pre_mp_init(void)
-{
- const msr_t syscfg = rdmsr(SYSCFG_MSR);
- if (syscfg.lo & SYSCFG_MSR_TOM2WB)
- x86_setup_mtrrs_with_detect_no_above_4gb();
- else
- x86_setup_mtrrs_with_detect();
- x86_mtrr_check();
-}
-
-static const struct mp_ops mp_ops = {
- .pre_mp_init = pre_mp_init,
- .get_cpu_count = get_cpu_count,
- .get_smm_info = get_smm_info,
- .relocation_handler = smm_relocation_handler,
- .post_mp_init = global_smi_enable,
-};
-
void mp_init_cpus(struct bus *cpu_bus)
{
- if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
+ extern const struct mp_ops amd_mp_ops_with_smm;
+ if (mp_init_with_smm(cpu_bus, &amd_mp_ops_with_smm) != CB_SUCCESS)
die_with_post_code(POST_HW_INIT_FAILURE,
"mp_init_with_smm failed. Halting.\n");
diff --git a/src/soc/amd/picasso/cpu.c b/src/soc/amd/picasso/cpu.c
index 08447e9be3..47c71ab139 100644
--- a/src/soc/amd/picasso/cpu.c
+++ b/src/soc/amd/picasso/cpu.c
@@ -4,58 +4,24 @@
#include <amdblocks/cpu.h>
#include <amdblocks/iomap.h>
#include <amdblocks/mca.h>
-#include <amdblocks/reset.h>
-#include <amdblocks/smm.h>
-#include <assert.h>
#include <console/console.h>
#include <cpu/amd/microcode.h>
-#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/mp.h>
-#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
#include <device/device.h>
-#include <device/pci_ops.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
-#include <soc/pci_devs.h>
-#include <soc/smi.h>
-#include <types.h>
_Static_assert(CONFIG_MAX_CPUS == 8, "Do not override MAX_CPUS. To reduce the number of "
"available cores, use the downcore_mode and disable_smt devicetree settings instead.");
/* MP and SMM loading initialization. */
-/*
- * Do essential initialization tasks before APs can be fired up -
- *
- * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
- * creates the MTRR solution that the APs will use. Otherwise APs will try to
- * apply the incomplete solution as the BSP is calculating it.
- */
-static void pre_mp_init(void)
-{
- const msr_t syscfg = rdmsr(SYSCFG_MSR);
- if (syscfg.lo & SYSCFG_MSR_TOM2WB)
- x86_setup_mtrrs_with_detect_no_above_4gb();
- else
- x86_setup_mtrrs_with_detect();
- x86_mtrr_check();
-}
-
-static const struct mp_ops mp_ops = {
- .pre_mp_init = pre_mp_init,
- .get_cpu_count = get_cpu_count,
- .get_smm_info = get_smm_info,
- .relocation_handler = smm_relocation_handler,
- .post_mp_init = global_smi_enable,
-};
-
void mp_init_cpus(struct bus *cpu_bus)
{
- if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
+ extern const struct mp_ops amd_mp_ops_with_smm;
+ if (mp_init_with_smm(cpu_bus, &amd_mp_ops_with_smm) != CB_SUCCESS)
die_with_post_code(POST_HW_INIT_FAILURE,
"mp_init_with_smm failed. Halting.\n");
diff --git a/src/soc/amd/stoneyridge/cpu.c b/src/soc/amd/stoneyridge/cpu.c
index e8519f8eae..8e44ede39e 100644
--- a/src/soc/amd/stoneyridge/cpu.c
+++ b/src/soc/amd/stoneyridge/cpu.c
@@ -4,56 +4,25 @@
#include <amdblocks/iomap.h>
#include <amdblocks/mca.h>
#include <amdblocks/reset.h>
-#include <amdblocks/smm.h>
-#include <console/console.h>
#include <cpu/amd/msr.h>
-#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/mp.h>
-#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
-#include <cpu/x86/smm.h>
+#include <cpu/x86/msr.h>
#include <device/device.h>
#include <device/pci_ops.h>
+#include <soc/pci_devs.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
-#include <soc/northbridge.h>
-#include <soc/pci_devs.h>
-#include <soc/smi.h>
-#include <types.h>
+#include <console/console.h>
/*
* MP and SMM loading initialization.
*/
-
-/*
- * Do essential initialization tasks before APs can be fired up -
- *
- * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
- * creates the MTRR solution that the APs will use. Otherwise APs will try to
- * apply the incomplete solution as the BSP is calculating it.
- */
-static void pre_mp_init(void)
-{
- const msr_t syscfg = rdmsr(SYSCFG_MSR);
- if (syscfg.lo & SYSCFG_MSR_TOM2WB)
- x86_setup_mtrrs_with_detect_no_above_4gb();
- else
- x86_setup_mtrrs_with_detect();
- x86_mtrr_check();
-}
-
-static const struct mp_ops mp_ops = {
- .pre_mp_init = pre_mp_init,
- .get_cpu_count = get_cpu_count,
- .get_smm_info = get_smm_info,
- .relocation_handler = smm_relocation_handler,
- .post_mp_init = global_smi_enable,
-};
-
void mp_init_cpus(struct bus *cpu_bus)
{
- if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
+ extern const struct mp_ops amd_mp_ops_with_smm;
+ if (mp_init_with_smm(cpu_bus, &amd_mp_ops_with_smm) != CB_SUCCESS)
die_with_post_code(POST_HW_INIT_FAILURE,
"mp_init_with_smm failed. Halting.\n");