summaryrefslogtreecommitdiff
path: root/src/soc/intel
diff options
context:
space:
mode:
authorShuo Liu <shuo.liu@intel.com>2024-03-27 04:26:16 +0800
committerFelix Held <felix-coreboot@felixheld.de>2024-05-06 17:34:07 +0000
commit9580e7fba87f3e933c2fc1a654ecfcde9c8376f1 (patch)
tree8f01f8de9764cb1eb558e8c617f209e3a5158942 /src/soc/intel
parent49e5d3dc262c91a4fbf9c05e97c19ded1098247f (diff)
soc/intel/xeon_sp: Add fill_pd_distances
Update a simple algorithm to cover some basic case for proximity domain distance handling. In the same time, the local variable usage of fill_pds() is optimized. TEST=Build and boot on intel/archercity CRB ACPI SRAT, SLIT and DMAR (Remapping Hardware Static Affinity) are generated correctly for 2S system. Change-Id: I2b666dc2a140d1bb1fdff9bc7b835d5cf5b4bbc5 Signed-off-by: Shuo Liu <shuo.liu@intel.com> Co-authored-by: Ziang Wang <ziang.wang@intel.com> Co-authored-by: Gang Chen <gang.c.chen@intel.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/81442 Reviewed-by: Patrick Rudolph <patrick.rudolph@9elements.com> Reviewed-by: Angel Pons <th3fanbus@gmail.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/soc/intel')
-rw-r--r--src/soc/intel/xeon_sp/include/soc/numa.h1
-rw-r--r--src/soc/intel/xeon_sp/numa.c53
-rw-r--r--src/soc/intel/xeon_sp/uncore.c1
3 files changed, 36 insertions, 19 deletions
diff --git a/src/soc/intel/xeon_sp/include/soc/numa.h b/src/soc/intel/xeon_sp/include/soc/numa.h
index 3e495e28c8..7bc86ae52c 100644
--- a/src/soc/intel/xeon_sp/include/soc/numa.h
+++ b/src/soc/intel/xeon_sp/include/soc/numa.h
@@ -59,6 +59,7 @@ extern struct proximity_domains pds;
void dump_pds(void);
void fill_pds(void);
+void fill_pd_distances(void);
/*
* Return the total size of memory regions in generic initiator affinity
diff --git a/src/soc/intel/xeon_sp/numa.c b/src/soc/intel/xeon_sp/numa.c
index 1f19b6e7bc..69ca0637b1 100644
--- a/src/soc/intel/xeon_sp/numa.c
+++ b/src/soc/intel/xeon_sp/numa.c
@@ -50,8 +50,8 @@ void fill_pds(void)
memset(pds.pds, 0, sizeof(struct proximity_domain) * pds.num_pds);
/* Fill in processor domains */
- uint8_t i, j, socket;
- for (socket = 0, i = 0; i < num_sockets; socket++) {
+ uint8_t i = 0;
+ for (uint8_t socket = 0; socket < num_sockets; socket++) {
if (!soc_cpu_is_enabled(socket))
continue;
pds.pds[i].pd_type = PD_TYPE_PROCESSOR;
@@ -59,13 +59,6 @@ void fill_pds(void)
pds.pds[i].distances = malloc(sizeof(uint8_t) * pds.num_pds);
if (!pds.pds[i].distances)
die("%s %d out of memory.", __FILE__, __LINE__);
- /* hard code the distances for now, till we know how to calculate them. */
- for (j = 0; j < pds.num_pds; j++) {
- if (j == i)
- pds.pds[i].distances[j] = 0x0a;
- else
- pds.pds[i].distances[j] = 0x0e;
- }
i++;
}
@@ -75,10 +68,9 @@ void fill_pds(void)
#if CONFIG(SOC_INTEL_HAS_CXL)
/* There are CXL nodes, fill in generic initiator domain after the processors pds */
- uint8_t skt_id, cxl_id;
const CXL_NODE_SOCKET *cxl_hob = get_cxl_node();
- for (skt_id = 0, i = num_sockets; skt_id < MAX_SOCKET; skt_id++, i++) {
- for (cxl_id = 0; cxl_id < cxl_hob[skt_id].CxlNodeCount; ++cxl_id) {
+ for (uint8_t skt_id = 0; skt_id < MAX_SOCKET; skt_id++) {
+ for (uint8_t cxl_id = 0; cxl_id < cxl_hob[skt_id].CxlNodeCount; ++cxl_id) {
const CXL_NODE_INFO node = cxl_hob[skt_id].CxlNodeInfo[cxl_id];
pds.pds[i].pd_type = PD_TYPE_GENERIC_INITIATOR;
pds.pds[i].socket_bitmap = node.SocketBitmap;
@@ -89,13 +81,7 @@ void fill_pds(void)
pds.pds[i].distances = malloc(sizeof(uint8_t) * pds.num_pds);
if (!pds.pds[i].distances)
die("%s %d out of memory.", __FILE__, __LINE__);
- /* hard code the distances until we know how to calculate them */
- for (j = 0; j < pds.num_pds; j++) {
- if (j == i)
- pds.pds[i].distances[j] = 0x0a;
- else
- pds.pds[i].distances[j] = 0x0e;
- }
+ i++;
}
}
#endif
@@ -158,3 +144,32 @@ uint32_t memory_to_pd(const struct SystemMemoryMapElement *mem)
{
return socket_to_pd(mem->SocketId);
}
+
+#define PD_DISTANCE_SELF 0x0A
+#define PD_DISTANCE_SAME_SOCKET 0x0C
+#define PD_DISTANCE_CROSS_SOCKET 0x14
+#define PD_DISTANCE_MAX 0xFF
+#define PD_DISTANCE_IO_EXTRA 0x01
+
+void fill_pd_distances(void)
+{
+ for (int i = 0; i < pds.num_pds; i++) {
+ for (int j = 0; j < pds.num_pds; j++) {
+ if (i == j) {
+ pds.pds[i].distances[j] = PD_DISTANCE_SELF;
+ continue;
+ }
+
+ if (pds.pds[i].socket_bitmap == pds.pds[j].socket_bitmap)
+ pds.pds[i].distances[j] = PD_DISTANCE_SAME_SOCKET;
+ else
+ pds.pds[i].distances[j] = PD_DISTANCE_CROSS_SOCKET;
+
+ if (pds.pds[i].pd_type == PD_TYPE_GENERIC_INITIATOR)
+ pds.pds[i].distances[j] += PD_DISTANCE_IO_EXTRA;
+
+ if (pds.pds[j].pd_type == PD_TYPE_GENERIC_INITIATOR)
+ pds.pds[i].distances[j] += PD_DISTANCE_IO_EXTRA;
+ }
+ }
+}
diff --git a/src/soc/intel/xeon_sp/uncore.c b/src/soc/intel/xeon_sp/uncore.c
index 336bd35a69..efa61b7c16 100644
--- a/src/soc/intel/xeon_sp/uncore.c
+++ b/src/soc/intel/xeon_sp/uncore.c
@@ -340,6 +340,7 @@ static void mmapvtd_read_resources(struct device *dev)
if (!once) {
/* Construct NUMA data structure. This is needed for CXL. */
fill_pds();
+ fill_pd_distances();
dump_pds();
once = true;
}