1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
|
/* SPDX-License-Identifier: GPL-2.0-only */
#include <amdblocks/mca.h>
#include <amdblocks/reset.h>
#include <amdblocks/smm.h>
#include <console/console.h>
#include <cpu/amd/msr.h>
#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h>
#include <device/device.h>
#include <device/pci_ops.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
#include <soc/northbridge.h>
#include <soc/pci_devs.h>
#include <soc/smi.h>
#include <types.h>
/*
* MP and SMM loading initialization.
*/
/*
* Do essential initialization tasks before APs can be fired up -
*
* 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
* creates the MTRR solution that the APs will use. Otherwise APs will try to
* apply the incomplete solution as the BSP is calculating it.
*/
static void pre_mp_init(void)
{
const msr_t syscfg = rdmsr(SYSCFG_MSR);
if (syscfg.lo & SYSCFG_MSR_TOM2WB)
x86_setup_mtrrs_with_detect_no_above_4gb();
else
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
static int get_cpu_count(void)
{
return 1 + (cpuid_ecx(0x80000008) & 0xff);
}
static const struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
.get_smm_info = get_smm_info,
.relocation_handler = smm_relocation_handler,
.post_mp_init = global_smi_enable,
};
void mp_init_cpus(struct bus *cpu_bus)
{
if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
die_with_post_code(POST_HW_INIT_FAILURE,
"mp_init_with_smm failed. Halting.\n");
/* The flash is now no longer cacheable. Reset to WP for performance. */
mtrr_use_temp_range(FLASH_BASE_ADDR, CONFIG_ROM_SIZE, MTRR_TYPE_WRPROT);
set_warm_reset_flag();
}
static void model_15_init(struct device *dev)
{
check_mca();
/*
* Per AMD, sync an undocumented MSR with the PSP base address.
* Experiments showed that if you write to the MSR after it has
* been previously programmed, it causes a general protection fault.
* Also, the MSR survives warm reset and S3 cycles, so we need to
* test if it was previously written before writing to it.
*/
msr_t psp_msr;
uint32_t psp_bar; /* Note: NDA BKDG names this 32-bit register BAR3 */
psp_bar = pci_read_config32(SOC_PSP_DEV, PCI_BASE_ADDRESS_4);
psp_bar &= ~PCI_BASE_ADDRESS_MEM_ATTR_MASK;
psp_msr = rdmsr(PSP_ADDR_MSR);
if (psp_msr.lo == 0) {
psp_msr.lo = psp_bar;
wrmsr(PSP_ADDR_MSR, psp_msr);
}
}
static struct device_operations cpu_dev_ops = {
.init = model_15_init,
};
static struct cpu_device_id cpu_table[] = {
{ X86_VENDOR_AMD, 0x660f01 },
{ X86_VENDOR_AMD, 0x670f00 },
{ 0, 0 },
};
static const struct cpu_driver model_15 __cpu_driver = {
.ops = &cpu_dev_ops,
.id_table = cpu_table,
};
|