1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
|
/* SPDX-License-Identifier: GPL-2.0-only */
#include <acpi/acpi_device.h>
#include <amdblocks/data_fabric.h>
#include <console/console.h>
#include <cpu/x86/lapic_def.h>
#include <device/device.h>
#include <device/pci.h>
#include <device/pci_ids.h>
#include <soc/data_fabric.h>
#include <soc/iomap.h>
#include <types.h>
void data_fabric_set_mmio_np(void)
{
/*
* Mark region from HPET-LAPIC or 0xfed00000-0xfee00000-1 as NP.
*
* AGESA has already programmed the NB MMIO routing, however nothing
* is yet marked as non-posted.
*
* If there exists an overlapping routing base/limit pair, trim its
* base or limit to avoid the new NP region. If any pair exists
* completely within HPET-LAPIC range, remove it. If any pair surrounds
* HPET-LAPIC, it must be split into two regions.
*
* TODO(b/156296146): Remove the settings from AGESA and allow coreboot
* to own everything. If not practical, consider erasing all settings
* and have coreboot reprogram them. At that time, make the source
* below more flexible.
* * Note that the code relies on the granularity of the HPET and
* LAPIC addresses being sufficiently large that the shifted limits
* +/-1 are always equivalent to the non-shifted values +/-1.
*/
unsigned int i;
int reg;
uint32_t base, limit, ctrl;
const uint32_t np_bot = HPET_BASE_ADDRESS >> D18F0_MMIO_SHIFT;
const uint32_t np_top = (LAPIC_DEFAULT_BASE - 1) >> D18F0_MMIO_SHIFT;
data_fabric_print_mmio_conf();
for (i = 0; i < NUM_NB_MMIO_REGS; i++) {
/* Adjust all registers that overlap */
ctrl = data_fabric_broadcast_read32(0, NB_MMIO_CONTROL(i));
if (!(ctrl & (MMIO_WE | MMIO_RE)))
continue; /* not enabled */
base = data_fabric_broadcast_read32(0, NB_MMIO_BASE(i));
limit = data_fabric_broadcast_read32(0, NB_MMIO_LIMIT(i));
if (base > np_top || limit < np_bot)
continue; /* no overlap at all */
if (base >= np_bot && limit <= np_top) {
data_fabric_disable_mmio_reg(i); /* 100% within, so remove */
continue;
}
if (base < np_bot && limit > np_top) {
/* Split the configured region */
data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(i), np_bot - 1);
reg = data_fabric_find_unused_mmio_reg();
if (reg < 0) {
/* Although a pair could be freed later, this condition is
* very unusual and deserves analysis. Flag an error and
* leave the topmost part unconfigured. */
printk(BIOS_ERR,
"Error: Not enough NB MMIO routing registers\n");
continue;
}
data_fabric_broadcast_write32(0, NB_MMIO_BASE(reg), np_top + 1);
data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(reg), limit);
data_fabric_broadcast_write32(0, NB_MMIO_CONTROL(reg), ctrl);
continue;
}
/* If still here, adjust only the base or limit */
if (base <= np_bot)
data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(i), np_bot - 1);
else
data_fabric_broadcast_write32(0, NB_MMIO_BASE(i), np_top + 1);
}
reg = data_fabric_find_unused_mmio_reg();
if (reg < 0) {
printk(BIOS_ERR, "Error: cannot configure region as NP\n");
return;
}
data_fabric_broadcast_write32(0, NB_MMIO_BASE(reg), np_bot);
data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(reg), np_top);
data_fabric_broadcast_write32(0, NB_MMIO_CONTROL(reg),
(IOMS0_FABRIC_ID << MMIO_DST_FABRIC_ID_SHIFT) | MMIO_NP | MMIO_WE
| MMIO_RE);
data_fabric_print_mmio_conf();
}
static const char *data_fabric_acpi_name(const struct device *dev)
{
switch (dev->device) {
case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF0:
case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF0:
return "DFD0";
case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF1:
case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF1:
return "DFD1";
case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF2:
case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF2:
return "DFD2";
case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF3:
case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF3:
return "DFD3";
case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF4:
case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF4:
return "DFD4";
case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF5:
case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF5:
return "DFD5";
case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF6:
case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF6:
return "DFD6";
case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF7:
case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF7:
return "DFD7";
default:
printk(BIOS_ERR, "%s: Unhandled device id 0x%x\n", __func__, dev->device);
}
return NULL;
}
static struct device_operations data_fabric_ops = {
.read_resources = noop_read_resources,
.set_resources = noop_set_resources,
.acpi_name = data_fabric_acpi_name,
.acpi_fill_ssdt = acpi_device_write_pci_dev,
};
static const unsigned short pci_device_ids[] = {
/* Renoir DF devices */
PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF0,
PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF1,
PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF2,
PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF3,
PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF4,
PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF5,
PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF6,
PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF7,
/* Cezanne DF devices */
PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF0,
PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF1,
PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF2,
PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF3,
PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF4,
PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF5,
PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF6,
PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF7,
0
};
static const struct pci_driver data_fabric_driver __pci_driver = {
.ops = &data_fabric_ops,
.vendor = PCI_VENDOR_ID_AMD,
.devices = pci_device_ids,
};
|