/* SPDX-License-Identifier: GPL-2.0-only */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DEFAULT_CPU_D_STATE D0 #define LPI_STATES_ALL 0xff #define LPI_REVISION 0 #define LPI_ENABLED 1 /* * List of supported C-states in this processor. */ enum { C_STATE_C0, /* 0 */ C_STATE_C1, /* 1 */ C_STATE_C1E, /* 2 */ C_STATE_C6_SHORT_LAT, /* 3 */ C_STATE_C6_LONG_LAT, /* 4 */ C_STATE_C7_SHORT_LAT, /* 5 */ C_STATE_C7_LONG_LAT, /* 6 */ C_STATE_C7S_SHORT_LAT, /* 7 */ C_STATE_C7S_LONG_LAT, /* 8 */ C_STATE_C8, /* 9 */ C_STATE_C9, /* 10 */ C_STATE_C10, /* 11 */ NUM_C_STATES }; static const acpi_cstate_t cstate_map[NUM_C_STATES] = { [C_STATE_C0] = {}, [C_STATE_C1] = { .latency = C1_LATENCY, .power = C1_POWER, .resource = MWAIT_RES(0, 0), }, [C_STATE_C1E] = { .latency = C1_LATENCY, .power = C1_POWER, .resource = MWAIT_RES(0, 1), }, [C_STATE_C6_SHORT_LAT] = { .latency = C6_LATENCY, .power = C6_POWER, .resource = MWAIT_RES(2, 0), }, [C_STATE_C6_LONG_LAT] = { .latency = C6_LATENCY, .power = C6_POWER, .resource = MWAIT_RES(2, 1), }, [C_STATE_C7_SHORT_LAT] = { .latency = C7_LATENCY, .power = C7_POWER, .resource = MWAIT_RES(3, 0), }, [C_STATE_C7_LONG_LAT] = { .latency = C7_LATENCY, .power = C7_POWER, .resource = MWAIT_RES(3, 1), }, [C_STATE_C7S_SHORT_LAT] = { .latency = C7_LATENCY, .power = C7_POWER, .resource = MWAIT_RES(3, 2), }, [C_STATE_C7S_LONG_LAT] = { .latency = C7_LATENCY, .power = C7_POWER, .resource = MWAIT_RES(3, 3), }, [C_STATE_C8] = { .latency = C8_LATENCY, .power = C8_POWER, .resource = MWAIT_RES(4, 0), }, [C_STATE_C9] = { .latency = C9_LATENCY, .power = C9_POWER, .resource = MWAIT_RES(5, 0), }, [C_STATE_C10] = { .latency = C10_LATENCY, .power = C10_POWER, .resource = MWAIT_RES(6, 0), }, }; static int cstate_set_non_s0ix[] = { C_STATE_C1, C_STATE_C6_LONG_LAT, C_STATE_C7S_LONG_LAT }; static int cstate_set_s0ix[] = { C_STATE_C1, C_STATE_C6_LONG_LAT, C_STATE_C10 }; enum dev_sleep_states { D0, /* 0 */ D1, /* 1 */ D2, /* 2 */ D3, /* 3 */ NONE }; const acpi_cstate_t *soc_get_cstate_map(size_t *entries) { static acpi_cstate_t map[MAX(ARRAY_SIZE(cstate_set_s0ix), ARRAY_SIZE(cstate_set_non_s0ix))]; int *set; int i; config_t *config = config_of_soc(); int is_s0ix_enable = config->s0ix_enable; if (is_s0ix_enable) { *entries = ARRAY_SIZE(cstate_set_s0ix); set = cstate_set_s0ix; } else { *entries = ARRAY_SIZE(cstate_set_non_s0ix); set = cstate_set_non_s0ix; } for (i = 0; i < *entries; i++) { map[i] = cstate_map[set[i]]; map[i].ctype = i + 1; } return map; } void soc_power_states_generation(int core_id, int cores_per_package) { config_t *config = config_of_soc(); if (config->eist_enable) /* Generate P-state tables */ generate_p_state_entries(core_id, cores_per_package); } void soc_fill_fadt(acpi_fadt_t *fadt) { const uint16_t pmbase = ACPI_BASE_ADDRESS; config_t *config = config_of_soc(); fadt->pm_tmr_blk = pmbase + PM1_TMR; fadt->pm_tmr_len = 4; fadt->x_pm_tmr_blk.space_id = ACPI_ADDRESS_SPACE_IO; fadt->x_pm_tmr_blk.bit_width = fadt->pm_tmr_len * 8; fadt->x_pm_tmr_blk.bit_offset = 0; fadt->x_pm_tmr_blk.access_size = ACPI_ACCESS_SIZE_DWORD_ACCESS; fadt->x_pm_tmr_blk.addrl = fadt->pm_tmr_blk; fadt->x_pm_tmr_blk.addrh = 0x0; if (config->s0ix_enable) fadt->flags |= ACPI_FADT_LOW_PWR_IDLE_S0; } static const struct { uint8_t pci_dev; enum dev_sleep_states min_sleep_state; } min_pci_sleep_states[] = { { SA_DEVFN_ROOT, D3 }, { SA_DEVFN_CPU_PCIE1_0, D3 }, { SA_DEVFN_IGD, D3 }, { SA_DEVFN_DPTF, D3 }, { SA_DEVFN_IPU, D3 }, { SA_DEVFN_CPU_PCIE6_0, D3 }, { SA_DEVFN_CPU_PCIE6_2, D3 }, { SA_DEVFN_TBT0, D3 }, { SA_DEVFN_TBT1, D3 }, { SA_DEVFN_TBT2, D3 }, { SA_DEVFN_TBT3, D3 }, { SA_DEVFN_GNA, D3 }, { SA_DEVFN_TCSS_XHCI, D3 }, { SA_DEVFN_TCSS_XDCI, D3 }, { SA_DEVFN_TCSS_DMA0, D3 }, { SA_DEVFN_TCSS_DMA1, D3 }, { SA_DEVFN_VMD, D3 }, { PCH_DEVFN_I2C6, D3 }, { PCH_DEVFN_I2C7, D3 }, { PCH_DEVFN_THC0, D3 }, { PCH_DEVFN_THC1, D3 }, { PCH_DEVFN_XHCI, D3 }, { PCH_DEVFN_USBOTG, D3 }, { PCH_DEVFN_SRAM, D3 }, { PCH_DEVFN_CNVI_WIFI, D3 }, { PCH_DEVFN_I2C0, D3 }, { PCH_DEVFN_I2C1, D3 }, { PCH_DEVFN_I2C2, D3 }, { PCH_DEVFN_I2C3, D3 }, { PCH_DEVFN_CSE, D0 }, { PCH_DEVFN_SATA, D3 }, { PCH_DEVFN_I2C4, D3 }, { PCH_DEVFN_I2C5, D3 }, { PCH_DEVFN_UART2, D3 }, { PCH_DEVFN_PCIE1, D0 }, { PCH_DEVFN_PCIE2, D0 }, { PCH_DEVFN_PCIE3, D0 }, { PCH_DEVFN_PCIE4, D0 }, { PCH_DEVFN_PCIE5, D0 }, { PCH_DEVFN_PCIE6, D0 }, { PCH_DEVFN_PCIE7, D0 }, { PCH_DEVFN_PCIE8, D0 }, { PCH_DEVFN_PCIE9, D0 }, { PCH_DEVFN_PCIE10, D0 }, { PCH_DEVFN_PCIE11, D0 }, { PCH_DEVFN_PCIE12, D0 }, { PCH_DEVFN_UART0, D3 }, { PCH_DEVFN_UART1, D3 }, { PCH_DEVFN_GSPI0, D3 }, { PCH_DEVFN_GSPI1, D3 }, { PCH_DEVFN_ESPI, D0 }, { PCH_DEVFN_PMC, D0 }, { PCH_DEVFN_HDA, D0 }, { PCH_DEVFN_SPI, D3 }, { PCH_DEVFN_GBE, D3 }, }; static enum dev_sleep_states get_min_sleep_state(const struct device *dev) { if (!is_dev_enabled(dev)) return NONE; switch (dev->path.type) { case DEVICE_PATH_APIC: return DEFAULT_CPU_D_STATE; case DEVICE_PATH_PCI: /* skip external buses*/ if (dev->bus->secondary != 0) return NONE; for (size_t i = 0; i < ARRAY_SIZE(min_pci_sleep_states); i++) if (min_pci_sleep_states[i].pci_dev == dev->path.pci.devfn) return min_pci_sleep_states[i].min_sleep_state; printk(BIOS_WARNING, "Unknown min d_state for %x\n", dev->path.pci.devfn); return NONE; default: return NONE; } } /* Generate the LPI constraint table and return the number of devices included */ void soc_lpi_get_constraints(void *unused) { unsigned int num_entries; const struct device *dev; enum dev_sleep_states min_sleep_state; num_entries = 0; for (dev = all_devices; dev; dev = dev->next) { if (get_min_sleep_state(dev) != NONE) num_entries++; } acpigen_emit_byte(RETURN_OP); acpigen_write_package(num_entries); size_t cpu_index = 0; for (dev = all_devices; dev; dev = dev->next) { min_sleep_state = get_min_sleep_state(dev); if (min_sleep_state == NONE) continue; acpigen_write_package(3); { char path[32] = { 0 }; /* Emit the device path */ switch (dev->path.type) { case DEVICE_PATH_PCI: acpigen_emit_namestring(acpi_device_path(dev)); break; case DEVICE_PATH_APIC: snprintf(path, sizeof(path), CONFIG_ACPI_CPU_STRING, cpu_index++); acpigen_emit_namestring(path); break; default: /* Unhandled */ printk(BIOS_WARNING, "Unhandled device path type %d\n", dev->path.type); acpigen_emit_namestring(NULL); break; } acpigen_write_integer(LPI_ENABLED); acpigen_write_package(2); { acpigen_write_integer(LPI_REVISION); acpigen_write_package(2); /* no optional device info */ { /* Assume constraints apply to all entries */ acpigen_write_integer(LPI_STATES_ALL); acpigen_write_integer(min_sleep_state); /* min D-state */ } acpigen_write_package_end(); } acpigen_write_package_end(); } acpigen_write_package_end(); } acpigen_write_package_end(); printk(BIOS_INFO, "Returning SoC specific constraint package for %d devices\n", num_entries); } uint32_t soc_read_sci_irq_select(void) { return read32p(soc_read_pmc_base() + IRQ_REG); } static unsigned long soc_fill_dmar(unsigned long current) { const uint64_t gfxvtbar = MCHBAR64(GFXVTBAR) & VTBAR_MASK; const bool gfxvten = MCHBAR32(GFXVTBAR) & VTBAR_ENABLED; if (is_devfn_enabled(SA_DEVFN_IGD) && gfxvtbar && gfxvten) { const unsigned long tmp = current; current += acpi_create_dmar_drhd(current, 0, 0, gfxvtbar); current += acpi_create_dmar_ds_pci(current, 0, SA_DEV_SLOT_IGD, 0); acpi_dmar_drhd_fixup(tmp, current); } const uint64_t ipuvtbar = MCHBAR64(IPUVTBAR) & VTBAR_MASK; const bool ipuvten = MCHBAR32(IPUVTBAR) & VTBAR_ENABLED; if (is_devfn_enabled(SA_DEVFN_IPU) && ipuvtbar && ipuvten) { const unsigned long tmp = current; current += acpi_create_dmar_drhd(current, 0, 0, ipuvtbar); current += acpi_create_dmar_ds_pci(current, 0, SA_DEV_SLOT_IPU, 0); acpi_dmar_drhd_fixup(tmp, current); } /* TCSS Thunderbolt root ports */ for (unsigned int i = 0; i < MAX_TBT_PCIE_PORT; i++) { if (is_devfn_enabled(SA_DEVFN_TBT(i))) { const uint64_t tbtbar = MCHBAR64(TBTxBAR(i)) & VTBAR_MASK; const bool tbten = MCHBAR32(TBTxBAR(i)) & VTBAR_ENABLED; if (tbtbar && tbten) { const unsigned long tmp = current; current += acpi_create_dmar_drhd(current, 0, 0, tbtbar); current += acpi_create_dmar_ds_pci_br(current, 0, SA_DEV_SLOT_TBT, i); acpi_dmar_drhd_fixup(tmp, current); } } } const uint64_t vtvc0bar = MCHBAR64(VTVC0BAR) & VTBAR_MASK; const bool vtvc0en = MCHBAR32(VTVC0BAR) & VTBAR_ENABLED; if (vtvc0bar && vtvc0en) { const unsigned long tmp = current; current += acpi_create_dmar_drhd(current, DRHD_INCLUDE_PCI_ALL, 0, vtvc0bar); current += acpi_create_dmar_ds_ioapic_from_hw(current, IO_APIC_ADDR, V_P2SB_CFG_IBDF_BUS, V_P2SB_CFG_IBDF_DEV, V_P2SB_CFG_IBDF_FUNC); current += acpi_create_dmar_ds_msi_hpet(current, 0, V_P2SB_CFG_HBDF_BUS, V_P2SB_CFG_HBDF_DEV, V_P2SB_CFG_HBDF_FUNC); acpi_dmar_drhd_fixup(tmp, current); } /* Add RMRR entry */ if (is_devfn_enabled(SA_DEVFN_IGD)) { const unsigned long tmp = current; current += acpi_create_dmar_rmrr(current, 0, sa_get_gsm_base(), sa_get_tolud_base() - 1); current += acpi_create_dmar_ds_pci(current, 0, SA_DEV_SLOT_IGD, 0); acpi_dmar_rmrr_fixup(tmp, current); } return current; } unsigned long sa_write_acpi_tables(const struct device *dev, unsigned long current, struct acpi_rsdp *rsdp) { acpi_dmar_t *const dmar = (acpi_dmar_t *)current; /* * Create DMAR table only if we have VT-d capability and FSP does not override its * feature. */ if ((pci_read_config32(dev, CAPID0_A) & VTD_DISABLE) || !(MCHBAR32(VTVC0BAR) & VTBAR_ENABLED)) return current; printk(BIOS_DEBUG, "ACPI: * DMAR\n"); acpi_create_dmar(dmar, DMAR_INTR_REMAP | DMA_CTRL_PLATFORM_OPT_IN_FLAG, soc_fill_dmar); current += dmar->header.length; current = acpi_align_current(current); acpi_add_table(rsdp, dmar); return current; } void soc_fill_gnvs(struct global_nvs *gnvs) { config_t *config = config_of_soc(); /* Enable DPTF based on mainboard configuration */ gnvs->dpte = config->dptf_enable; /* Set USB2/USB3 wake enable bitmaps. */ gnvs->u2we = config->usb2_wake_enable_bitmap; gnvs->u3we = config->usb3_wake_enable_bitmap; } int soc_madt_sci_irq_polarity(int sci) { return MP_IRQ_POLARITY_HIGH; }