summaryrefslogtreecommitdiff
path: root/src/soc
diff options
context:
space:
mode:
authorFelix Held <felix-coreboot@felixheld.de>2023-04-28 22:47:33 +0200
committerFelix Held <felix-coreboot@felixheld.de>2023-06-07 00:12:35 +0000
commit7a5dd781d147d1f119290f258f6897fffba417dd (patch)
tree3d381a23979b3e26ce0fb876ed31dc955df4a5dd /src/soc
parente3c9a04f8b398696394fe98b054f0f5bf5523425 (diff)
soc/amd/common/data_fabric/domain: provide amd_pci_domain_fill_ssdt
Generate the PCI0 _CRS ACPI resource template to tell the OS which PCI bus numbers and IO and MMIO regions can be used for PCI devices below _SB/PCI0. This data corresponds to what amd_pci_domain_scan_bus and amd_pci_domain_read_resources provided to the resource allocator. This makes sure that the PCI0 _CRS ACPI resource template matches the constraints the resource allocator used when allocating resources. TEST=With also the rest of the current patch train applied, the generated _CRS resource template contains the expected PCI bus numbers and IO and MMIO resources and both Linux and Windows boot on Mandolin. Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Signed-off-by: Felix Held <felix-coreboot@felixheld.de> Change-Id: Iaf6d38a8ef5bb0163c4d1c021bf892c323d9a448 Reviewed-on: https://review.coreboot.org/c/coreboot/+/74843 Reviewed-by: Eric Lai <eric_lai@quanta.corp-partner.google.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Raul Rangel <rrangel@chromium.org> Reviewed-by: Nico Huber <nico.h@gmx.de>
Diffstat (limited to 'src/soc')
-rw-r--r--src/soc/amd/common/block/data_fabric/domain.c88
-rw-r--r--src/soc/amd/common/block/include/amdblocks/data_fabric.h2
2 files changed, 90 insertions, 0 deletions
diff --git a/src/soc/amd/common/block/data_fabric/domain.c b/src/soc/amd/common/block/data_fabric/domain.c
index 9b9ccc4012..aec1596fc1 100644
--- a/src/soc/amd/common/block/data_fabric/domain.c
+++ b/src/soc/amd/common/block/data_fabric/domain.c
@@ -1,8 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+#include <acpi/acpigen.h>
#include <amdblocks/cpu.h>
#include <amdblocks/data_fabric.h>
#include <arch/ioapic.h>
+#include <arch/vga.h>
#include <console/console.h>
#include <cpu/amd/mtrr.h>
#include <device/device.h>
@@ -158,3 +160,89 @@ void amd_pci_domain_read_resources(struct device *domain)
add_data_fabric_mmio_regions(domain, &idx);
}
+
+static void write_ssdt_domain_io_producer_range_helper(const char *domain_name,
+ resource_t base, resource_t limit)
+{
+ printk(BIOS_DEBUG, "%s _CRS: adding IO range [%llx-%llx]\n", domain_name, base, limit);
+ acpigen_resource_producer_io(base, limit);
+}
+
+static void write_ssdt_domain_io_producer_range(const char *domain_name,
+ resource_t base, resource_t limit)
+{
+ /*
+ * Split the IO region at the PCI config IO ports so that the IO resource producer
+ * won't cover the same IO ports that the IO resource consumer for the PCI config IO
+ * ports in the same ACPI device already covers.
+ */
+ if (base < PCI_IO_CONFIG_INDEX) {
+ write_ssdt_domain_io_producer_range_helper(domain_name,
+ base,
+ MIN(limit, PCI_IO_CONFIG_INDEX - 1));
+ }
+ if (limit > PCI_IO_CONFIG_LAST_PORT) {
+ write_ssdt_domain_io_producer_range_helper(domain_name,
+ MAX(base, PCI_IO_CONFIG_LAST_PORT + 1),
+ limit);
+ }
+}
+
+static void write_ssdt_domain_mmio_producer_range(const char *domain_name,
+ resource_t base, resource_t limit)
+{
+ printk(BIOS_DEBUG, "%s _CRS: adding MMIO range [%llx-%llx]\n",
+ domain_name, base, limit);
+ acpigen_resource_producer_mmio(base, limit,
+ MEM_RSRC_FLAG_MEM_READ_WRITE | MEM_RSRC_FLAG_MEM_ATTR_NON_CACHE);
+}
+
+void amd_pci_domain_fill_ssdt(const struct device *domain)
+{
+ const char *acpi_scope = acpi_device_path(domain);
+ printk(BIOS_DEBUG, "%s ACPI scope: '%s'\n", __func__, acpi_scope);
+ acpigen_write_scope(acpi_device_path(domain));
+
+ acpigen_write_name("_CRS");
+ acpigen_write_resourcetemplate_header();
+
+ /* PCI bus number range in domain */
+ printk(BIOS_DEBUG, "%s _CRS: adding busses [%x-%x]\n", acpi_device_name(domain),
+ domain->link_list->secondary, domain->link_list->subordinate);
+ acpigen_resource_producer_bus_number(domain->link_list->secondary,
+ domain->link_list->subordinate);
+
+ if (domain->link_list->secondary == 0) {
+ /* ACPI 6.4.2.5 I/O Port Descriptor */
+ acpigen_write_io16(PCI_IO_CONFIG_INDEX, PCI_IO_CONFIG_LAST_PORT, 1,
+ PCI_IO_CONFIG_PORT_COUNT, 1);
+ }
+
+ struct resource *res;
+ for (res = domain->resource_list; res != NULL; res = res->next) {
+ if (!(res->flags & IORESOURCE_ASSIGNED))
+ return;
+ switch (res->flags & IORESOURCE_TYPE_MASK) {
+ case IORESOURCE_IO:
+ write_ssdt_domain_io_producer_range(acpi_device_name(domain),
+ res->base, res->limit);
+ break;
+ case IORESOURCE_MEM:
+ write_ssdt_domain_mmio_producer_range(acpi_device_name(domain),
+ res->base, res->limit);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (domain->link_list->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
+ printk(BIOS_DEBUG, "%s _CRS: adding VGA resource\n", acpi_device_name(domain));
+ acpigen_resource_producer_mmio(VGA_MMIO_BASE, VGA_MMIO_LIMIT,
+ MEM_RSRC_FLAG_MEM_READ_WRITE | MEM_RSRC_FLAG_MEM_ATTR_CACHE);
+ }
+
+ acpigen_write_resourcetemplate_footer();
+ /* Scope */
+ acpigen_pop_len();
+}
diff --git a/src/soc/amd/common/block/include/amdblocks/data_fabric.h b/src/soc/amd/common/block/include/amdblocks/data_fabric.h
index c021f50c2c..0ed3272342 100644
--- a/src/soc/amd/common/block/include/amdblocks/data_fabric.h
+++ b/src/soc/amd/common/block/include/amdblocks/data_fabric.h
@@ -47,4 +47,6 @@ void data_fabric_set_mmio_np(void);
void amd_pci_domain_read_resources(struct device *domain);
void amd_pci_domain_scan_bus(struct device *domain);
+void amd_pci_domain_fill_ssdt(const struct device *domain);
+
#endif /* AMD_BLOCK_DATA_FABRIC_H */