summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/northbridge/intel/haswell/Makefile.inc1
-rw-r--r--src/northbridge/intel/haswell/early_dmi.c96
-rw-r--r--src/northbridge/intel/haswell/early_pcie.c121
-rw-r--r--src/northbridge/intel/haswell/haswell.h3
-rw-r--r--src/northbridge/intel/haswell/native_raminit/raminit_native.c15
-rw-r--r--src/northbridge/intel/haswell/vcu_mailbox.c155
-rw-r--r--src/northbridge/intel/haswell/vcu_mailbox.h16
-rw-r--r--src/southbridge/intel/lynxpoint/Makefile.inc2
-rw-r--r--src/southbridge/intel/lynxpoint/early_pch_native.c52
-rw-r--r--src/southbridge/intel/lynxpoint/pch.h20
10 files changed, 480 insertions, 1 deletions
diff --git a/src/northbridge/intel/haswell/Makefile.inc b/src/northbridge/intel/haswell/Makefile.inc
index 329f1f7ffe..df0b097296 100644
--- a/src/northbridge/intel/haswell/Makefile.inc
+++ b/src/northbridge/intel/haswell/Makefile.inc
@@ -20,6 +20,7 @@ romstage-y += report_platform.c
postcar-y += memmap.c
ifeq ($(CONFIG_USE_NATIVE_RAMINIT),y)
+romstage-y += early_dmi.c early_pcie.c vcu_mailbox.c
subdirs-y += native_raminit
else
diff --git a/src/northbridge/intel/haswell/early_dmi.c b/src/northbridge/intel/haswell/early_dmi.c
new file mode 100644
index 0000000000..9941242fd5
--- /dev/null
+++ b/src/northbridge/intel/haswell/early_dmi.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <console/console.h>
+#include <northbridge/intel/haswell/haswell.h>
+#include <southbridge/intel/lynxpoint/pch.h>
+#include <types.h>
+
+static void dmi_print_link_status(int loglevel)
+{
+ const uint16_t dmilsts = dmibar_read16(DMILSTS);
+ printk(loglevel, "DMI: Running at Gen%u x%u\n", dmilsts & 0xf, dmilsts >> 4 & 0x1f);
+}
+
+#define RETRAIN (1 << 5)
+
+#define LTRN (1 << 11)
+
+static void dmi_setup_physical_layer(void)
+{
+ /* Program DMI AFE settings, which are needed for DMI to work */
+ peg_dmi_recipe(false, 0);
+
+ /* Additional DMI programming steps */
+ dmibar_setbits32(0x258, 1 << 29);
+ dmibar_clrsetbits32(0x208, 0x7ff, 0x6b5);
+ dmibar_clrsetbits32(0x22c, 0xffff, 0x2020);
+
+ /* Write SA reference code version */
+ dmibar_write32(0x71c, 0x0000000f);
+ dmibar_write32(0x720, 0x01060200);
+
+ /* We also have to bring up the PCH side of the DMI link */
+ pch_dmi_setup_physical_layer();
+
+ /* Write-once settings */
+ dmibar_clrsetbits32(DMILCAP, 0x3f00f, 2 << 0);
+
+ printk(BIOS_DEBUG, "Retraining DMI at Gen2 speeds...\n");
+ dmi_print_link_status(BIOS_DEBUG);
+
+ /* Retrain link */
+ dmibar_setbits16(DMILCTL, RETRAIN);
+ do {} while (dmibar_read16(DMILSTS) & LTRN);
+ dmi_print_link_status(BIOS_DEBUG);
+
+ /* Retrain link again for DMI Gen2 speeds */
+ dmibar_setbits16(DMILCTL, RETRAIN);
+ do {} while (dmibar_read16(DMILSTS) & LTRN);
+ dmi_print_link_status(BIOS_INFO);
+}
+
+#define VC_ACTIVE (1U << 31)
+
+#define VCNEGPND (1 << 1)
+
+#define DMI_VC_CFG(vcid, tcmap) (VC_ACTIVE | ((vcid) << 24) | (tcmap))
+
+static void dmi_tc_vc_mapping(void)
+{
+ printk(BIOS_DEBUG, "Programming SA DMI VC/TC mappings...\n");
+
+ if (CONFIG(INTEL_LYNXPOINT_LP))
+ dmibar_setbits8(0xa78, 1 << 1);
+
+ /* Each TC is mapped to one and only one VC */
+ const u32 vc0 = DMI_VC_CFG(0, (1 << 6) | (1 << 5) | (1 << 4) | (1 << 3) | (1 << 0));
+ const u32 vc1 = DMI_VC_CFG(1, (1 << 1));
+ const u32 vcp = DMI_VC_CFG(2, (1 << 2));
+ const u32 vcm = DMI_VC_CFG(7, (1 << 7));
+ dmibar_write32(DMIVC0RCTL, vc0);
+ dmibar_write32(DMIVC1RCTL, vc1);
+ dmibar_write32(DMIVCPRCTL, vcp);
+ dmibar_write32(DMIVCMRCTL, vcm);
+
+ /* Set Extended VC Count (EVCC) to 1 if VC1 is active */
+ dmibar_clrsetbits8(DMIPVCCAP1, 7, !!(vc1 & VC_ACTIVE));
+
+ /*
+ * We also have to program the PCH side of the DMI link. Since both ends
+ * must use the same Virtual Channel settings, we pass them as arguments.
+ */
+ pch_dmi_tc_vc_mapping(vc0, vc1, vcp, vcm);
+
+ printk(BIOS_DEBUG, "Waiting for SA DMI VC negotiation... ");
+ do {} while (dmibar_read16(DMIVC0RSTS) & VCNEGPND);
+ do {} while (dmibar_read16(DMIVC1RSTS) & VCNEGPND);
+ do {} while (dmibar_read16(DMIVCPRSTS) & VCNEGPND);
+ do {} while (dmibar_read16(DMIVCMRSTS) & VCNEGPND);
+ printk(BIOS_DEBUG, "done!\n");
+}
+
+void dmi_early_init(void)
+{
+ dmi_setup_physical_layer();
+ dmi_tc_vc_mapping();
+}
diff --git a/src/northbridge/intel/haswell/early_pcie.c b/src/northbridge/intel/haswell/early_pcie.c
new file mode 100644
index 0000000000..d3940e3fac
--- /dev/null
+++ b/src/northbridge/intel/haswell/early_pcie.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <console/console.h>
+#include <device/pci_def.h>
+#include <device/pci_mmio_cfg.h>
+#include <device/pci_ops.h>
+#include <northbridge/intel/haswell/haswell.h>
+#include <northbridge/intel/haswell/vcu_mailbox.h>
+#include <types.h>
+
+#define PEG_DEV(func) PCI_DEV(0, 1, func)
+
+#define MAX_PEG_FUNC 3
+
+static void peg_dmi_unset_and_set_mask_pcicfg(
+ volatile union pci_bank *const bank,
+ const uint32_t offset,
+ const uint32_t unset_mask,
+ const uint32_t set_mask,
+ const uint32_t shift,
+ const bool valid)
+{
+ if (!valid)
+ return;
+
+ volatile uint32_t *const addr = &bank->reg32[offset / sizeof(uint32_t)];
+ clrsetbits32(addr, unset_mask << shift, set_mask << shift);
+}
+
+static void peg_dmi_unset_and_set_mask_common(
+ const bool is_peg,
+ const uint32_t offset,
+ const uint32_t unset,
+ const uint32_t set,
+ const uint32_t shift,
+ const bool valid)
+{
+ const uint32_t unset_mask = unset << shift;
+ const uint32_t set_mask = set << shift;
+ if (is_peg) {
+ for (uint8_t i = 0; i < MAX_PEG_FUNC; i++)
+ pci_update_config32(PEG_DEV(i), offset, ~unset_mask, set_mask);
+ } else {
+ dmibar_clrsetbits32(offset, unset_mask, set_mask);
+ }
+}
+
+static void peg_dmi_unset_and_set_mask_vcu_mmio(
+ const uint32_t addr,
+ const uint32_t unset_mask,
+ const uint32_t set_mask,
+ const uint32_t shift,
+ const bool valid)
+{
+ if (!valid)
+ return;
+
+ vcu_update_mmio(addr, ~(unset_mask << shift), set_mask << shift);
+}
+
+#define BUNDLE_STEP 0x20
+
+static void *const dmibar = (void *)(uintptr_t)CONFIG_FIXED_DMIBAR_MMIO_BASE;
+
+void peg_dmi_recipe(const bool is_peg, const pci_devfn_t dev)
+{
+ const bool always = true;
+ const bool is_dmi = !is_peg;
+
+ /* Treat DMIBAR and PEG devices the same way */
+ volatile union pci_bank *const bank = is_peg ? pci_map_bus(dev) : dmibar;
+
+ const size_t bundles = (is_peg ? 8 : 2) * BUNDLE_STEP;
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP) {
+ /* These are actually per-lane */
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0xa00 + i, 0x1f, 0x0c, 0, always);
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0xa10 + i, 0x1f, 0x0c, 0, always);
+ }
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x904 + i, 0x1f, 0x02, 0, is_peg);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x904 + i, 0x1f, 0x03, 5, is_peg);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x90c + i, 0x3f, 0x09, 5, always);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x90c + i, 0x0f, 0x05, 21, is_peg);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x910 + i, 0x0f, 0x08, 6, is_peg);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x910 + i, 0x0f, 0x00, 10, always);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x910 + i, 0x07, 0x00, 18, always);
+
+ peg_dmi_unset_and_set_mask_vcu_mmio(0x0c008001, 0x1f, 0x03, 25, is_peg);
+ peg_dmi_unset_and_set_mask_vcu_mmio(0x0c0c8001, 0x3f, 0x00, 23, is_dmi);
+
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0xc28, 0x1f, 0x13, 18, always);
+
+ peg_dmi_unset_and_set_mask_common(is_peg, 0xc38, 0x01, 0x00, 6, always);
+ peg_dmi_unset_and_set_mask_common(is_peg, 0x260, 0x03, 0x02, 0, always);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x900 + i, 0x03, 0x00, 26, always);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x904 + i, 0x03, 0x03, 10, always);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x90c + i, 0x1f, 0x07, 25, is_peg);
+
+ for (size_t i = 0; i < bundles; i += BUNDLE_STEP)
+ peg_dmi_unset_and_set_mask_pcicfg(bank, 0x91c + i, 0x07, 0x05, 27, is_peg);
+}
diff --git a/src/northbridge/intel/haswell/haswell.h b/src/northbridge/intel/haswell/haswell.h
index 1b29f6baf0..30b4abd0a7 100644
--- a/src/northbridge/intel/haswell/haswell.h
+++ b/src/northbridge/intel/haswell/haswell.h
@@ -34,6 +34,9 @@ void haswell_early_initialization(void);
void haswell_late_initialization(void);
void haswell_unhide_peg(void);
+void dmi_early_init(void);
+void peg_dmi_recipe(const bool is_peg, const pci_devfn_t dev);
+
void report_platform_info(void);
struct acpi_rsdp;
diff --git a/src/northbridge/intel/haswell/native_raminit/raminit_native.c b/src/northbridge/intel/haswell/native_raminit/raminit_native.c
index 1aafdf8659..0938e026e3 100644
--- a/src/northbridge/intel/haswell/native_raminit/raminit_native.c
+++ b/src/northbridge/intel/haswell/native_raminit/raminit_native.c
@@ -1,7 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <console/console.h>
+#include <northbridge/intel/haswell/haswell.h>
#include <northbridge/intel/haswell/raminit.h>
+#include <types.h>
+
+static bool early_init_native(int s3resume)
+{
+ printk(BIOS_DEBUG, "Starting native platform initialisation\n");
+
+ if (!CONFIG(INTEL_LYNXPOINT_LP))
+ dmi_early_init();
+
+ return false;
+}
void perform_raminit(const int s3resume)
{
@@ -9,6 +21,9 @@ void perform_raminit(const int s3resume)
* See, this function's name is a lie. There are more things to
* do that memory initialisation, but they are relatively easy.
*/
+ const bool cpu_replaced = early_init_native(s3resume);
+
+ (void)cpu_replaced;
/** TODO: Implement the required magic **/
die("NATIVE RAMINIT: More Magic (tm) required.\n");
diff --git a/src/northbridge/intel/haswell/vcu_mailbox.c b/src/northbridge/intel/haswell/vcu_mailbox.c
new file mode 100644
index 0000000000..196b720c82
--- /dev/null
+++ b/src/northbridge/intel/haswell/vcu_mailbox.c
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <assert.h>
+#include <console/console.h>
+#include <delay.h>
+#include <northbridge/intel/haswell/haswell.h>
+#include <northbridge/intel/haswell/vcu_mailbox.h>
+#include <types.h>
+
+/*
+ * This is a library for the VCU (Validation Control Unit) mailbox. This
+ * mailbox is primarily used to adjust some magic PCIe tuning parameters.
+ *
+ * There are two revisions of the VCU mailbox. Rev1 is specific to Haswell
+ * stepping A0, and all other steppings use Rev2. Haswell stepping A0 CPUs
+ * are early Engineering Samples with undocumented errata, and most likely
+ * need special microcode updates to boot. Thus, the code does not support
+ * VCU mailbox Rev1, because no one should need it anymore.
+ */
+
+#define VCU_MAILBOX_INTERFACE 0x6c00
+#define VCU_MAILBOX_DATA 0x6c04
+
+#define VCU_RUN_BUSY (1U << 31)
+
+enum vcu_opcode {
+ VCU_INVALID_OPCODE = 0x00,
+ VCU_OPCODE_READ_VCU_API_VER_ID = 0x01,
+ VCU_OPCODE_OPEN_SEQ = 0x02,
+ VCU_OPCODE_CLOSE_SEQ = 0x03,
+ VCU_OPCODE_READ_DATA = 0x07,
+ VCU_OPCODE_WRITE_DATA = 0x08,
+ VCU_OPCODE_READ_CSR = 0x13,
+ VCU_OPCODE_WRITE_CSR = 0x14,
+ VCU_OPCODE_READ_MMIO = 0x15,
+ VCU_OPCODE_WRITE_MMIO = 0x16,
+};
+
+enum vcu_sequence {
+ SEQ_ID_READ_CSR = 0x1,
+ SEQ_ID_WRITE_CSR = 0x2,
+ SEQ_ID_READ_MMIO = 0x3,
+ SEQ_ID_WRITE_MMIO = 0x4,
+};
+
+#define VCU_RESPONSE_MASK 0xffff
+#define VCU_RESPONSE_SUCCESS 0x40
+#define VCU_RESPONSE_BUSY 0x80
+#define VCU_RESPONSE_THREAD_UNAVAILABLE 0x82
+#define VCU_RESPONSE_ILLEGAL 0x90
+
+/* FIXME: Use timer API */
+static void send_vcu_command(const enum vcu_opcode opcode, const uint32_t data)
+{
+ if (opcode == VCU_INVALID_OPCODE)
+ return;
+
+ for (unsigned int i = 0; i < 10; i++) {
+ mchbar_write32(VCU_MAILBOX_DATA, data);
+ mchbar_write32(VCU_MAILBOX_INTERFACE, opcode | VCU_RUN_BUSY);
+ uint32_t vcu_interface;
+ for (unsigned int j = 0; j < 100; j++) {
+ vcu_interface = mchbar_read32(VCU_MAILBOX_INTERFACE);
+ if (!(vcu_interface & VCU_RUN_BUSY))
+ break;
+
+ udelay(10);
+ }
+ if (vcu_interface & VCU_RUN_BUSY)
+ continue;
+
+ if ((vcu_interface & VCU_RESPONSE_MASK) == VCU_RESPONSE_SUCCESS)
+ return;
+ }
+ printk(BIOS_ERR, "VCU: Failed to send command\n");
+}
+
+static enum vcu_opcode get_register_opcode(enum vcu_sequence seq)
+{
+ switch (seq) {
+ case SEQ_ID_READ_CSR:
+ return VCU_OPCODE_READ_CSR;
+ case SEQ_ID_WRITE_CSR:
+ return VCU_OPCODE_WRITE_CSR;
+ case SEQ_ID_READ_MMIO:
+ return VCU_OPCODE_READ_MMIO;
+ case SEQ_ID_WRITE_MMIO:
+ return VCU_OPCODE_WRITE_MMIO;
+ default:
+ BUG();
+ return VCU_INVALID_OPCODE;
+ }
+}
+
+static enum vcu_opcode get_data_opcode(enum vcu_sequence seq)
+{
+ switch (seq) {
+ case SEQ_ID_READ_CSR:
+ case SEQ_ID_READ_MMIO:
+ return VCU_OPCODE_READ_DATA;
+ case SEQ_ID_WRITE_CSR:
+ case SEQ_ID_WRITE_MMIO:
+ return VCU_OPCODE_WRITE_DATA;
+ default:
+ BUG();
+ return VCU_INVALID_OPCODE;
+ }
+}
+
+static uint32_t send_vcu_sequence(uint32_t addr, enum vcu_sequence seq, uint32_t wr_data)
+{
+ send_vcu_command(VCU_OPCODE_OPEN_SEQ, seq);
+
+ send_vcu_command(get_register_opcode(seq), addr);
+
+ send_vcu_command(get_data_opcode(seq), wr_data);
+
+ const uint32_t rd_data = mchbar_read32(VCU_MAILBOX_DATA);
+
+ send_vcu_command(VCU_OPCODE_CLOSE_SEQ, seq);
+
+ return rd_data;
+}
+
+#define VCU_WRITE_IGNORED 0
+
+uint32_t vcu_read_csr(uint32_t addr)
+{
+ return send_vcu_sequence(addr, SEQ_ID_READ_CSR, VCU_WRITE_IGNORED);
+}
+
+void vcu_write_csr(uint32_t addr, uint32_t data)
+{
+ send_vcu_sequence(addr, SEQ_ID_WRITE_CSR, data);
+}
+
+void vcu_update_csr(uint32_t addr, uint32_t andvalue, uint32_t orvalue)
+{
+ vcu_write_csr(addr, (vcu_read_csr(addr) & andvalue) | orvalue);
+}
+
+uint32_t vcu_read_mmio(uint32_t addr)
+{
+ return send_vcu_sequence(addr, SEQ_ID_READ_MMIO, VCU_WRITE_IGNORED);
+}
+
+void vcu_write_mmio(uint32_t addr, uint32_t data)
+{
+ send_vcu_sequence(addr, SEQ_ID_WRITE_MMIO, data);
+}
+
+void vcu_update_mmio(uint32_t addr, uint32_t andvalue, uint32_t orvalue)
+{
+ vcu_write_mmio(addr, (vcu_read_mmio(addr) & andvalue) | orvalue);
+}
diff --git a/src/northbridge/intel/haswell/vcu_mailbox.h b/src/northbridge/intel/haswell/vcu_mailbox.h
new file mode 100644
index 0000000000..ba0a62e486
--- /dev/null
+++ b/src/northbridge/intel/haswell/vcu_mailbox.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef HASWELL_VCU_MAILBOX_H
+#define HASWELL_VCU_MAILBOX_H
+
+#include <stdint.h>
+
+uint32_t vcu_read_csr(uint32_t addr);
+void vcu_write_csr(uint32_t addr, uint32_t data);
+void vcu_update_csr(uint32_t addr, uint32_t andvalue, uint32_t orvalue);
+
+uint32_t vcu_read_mmio(uint32_t addr);
+void vcu_write_mmio(uint32_t addr, uint32_t data);
+void vcu_update_mmio(uint32_t addr, uint32_t andvalue, uint32_t orvalue);
+
+#endif /* HASWELL_VCU_MAILBOX_H */
diff --git a/src/southbridge/intel/lynxpoint/Makefile.inc b/src/southbridge/intel/lynxpoint/Makefile.inc
index 08dd6dd299..49f03f1c81 100644
--- a/src/southbridge/intel/lynxpoint/Makefile.inc
+++ b/src/southbridge/intel/lynxpoint/Makefile.inc
@@ -35,6 +35,8 @@ bootblock-y += early_pch.c
romstage-y += early_usb.c early_me.c me_status.c early_pch.c
romstage-y += pmutil.c
+romstage-$(CONFIG_USE_NATIVE_RAMINIT) += early_pch_native.c
+
ifeq ($(CONFIG_INTEL_LYNXPOINT_LP),y)
romstage-y += lp_gpio.c
ramstage-y += lp_gpio.c
diff --git a/src/southbridge/intel/lynxpoint/early_pch_native.c b/src/southbridge/intel/lynxpoint/early_pch_native.c
new file mode 100644
index 0000000000..5ddaba01fb
--- /dev/null
+++ b/src/southbridge/intel/lynxpoint/early_pch_native.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <console/console.h>
+#include <device/pci_ops.h>
+#include <southbridge/intel/lynxpoint/pch.h>
+#include <types.h>
+
+void pch_dmi_setup_physical_layer(void)
+{
+ /** FIXME: We need to make sure the SA supports Gen2 as well **/
+ if ((RCBA32(0x21a4) & 0x0f) == 0x02) {
+ /* Set Gen 2 Common Clock N_FTS */
+ RCBA32_AND_OR(0x2340, ~0x00ff0000, 0x3a << 16);
+
+ /* Set Target Link Speed to DMI Gen2 */
+ RCBA8_AND_OR(DLCTL2, ~0x07, 0x02);
+ }
+}
+
+#define VC_ACTIVE (1U << 31)
+
+#define VCNEGPND (1 << 1)
+
+void pch_dmi_tc_vc_mapping(const u32 vc0, const u32 vc1, const u32 vcp, const u32 vcm)
+{
+ printk(BIOS_DEBUG, "Programming PCH DMI VC/TC mappings...\n");
+
+ RCBA32_AND_OR(CIR0050, ~(0xf << 20), 2 << 20);
+ if (vcp & VC_ACTIVE)
+ RCBA32_OR(CIR0050, 1 << 19 | 1 << 17);
+
+ RCBA32(CIR0050); /* Ensure posted write hits */
+
+ /* Use the same virtual channel mapping on both ends of the DMI link */
+ RCBA32(V0CTL) = vc0;
+ RCBA32(V1CTL) = vc1;
+ RCBA32(V1CTL); /* Ensure posted write hits */
+ RCBA32(VPCTL) = vcp;
+ RCBA32(VPCTL); /* Ensure posted write hits */
+ RCBA32(VMCTL) = vcm;
+
+ /* Lock the registers */
+ RCBA32_OR(CIR0050, 1U << 31);
+ RCBA32(CIR0050); /* Ensure posted write hits */
+
+ printk(BIOS_DEBUG, "Waiting for PCH DMI VC negotiation... ");
+ do {} while (RCBA16(V0STS) & VCNEGPND);
+ do {} while (RCBA16(V1STS) & VCNEGPND);
+ do {} while (RCBA16(VPSTS) & VCNEGPND);
+ do {} while (RCBA16(VMSTS) & VCNEGPND);
+ printk(BIOS_DEBUG, "done!\n");
+}
diff --git a/src/southbridge/intel/lynxpoint/pch.h b/src/southbridge/intel/lynxpoint/pch.h
index 35649f600b..f833706164 100644
--- a/src/southbridge/intel/lynxpoint/pch.h
+++ b/src/southbridge/intel/lynxpoint/pch.h
@@ -112,6 +112,9 @@ enum pch_platform_type {
PCH_TYPE_ULT = 5,
};
+void pch_dmi_setup_physical_layer(void);
+void pch_dmi_tc_vc_mapping(u32 vc0, u32 vc1, u32 vcp, u32 vcm);
+
void usb_ehci_sleep_prepare(pci_devfn_t dev, u8 slp_typ);
void usb_ehci_disable(pci_devfn_t dev);
void usb_xhci_sleep_prepare(pci_devfn_t dev, u8 slp_typ);
@@ -405,9 +408,10 @@ void mainboard_config_rcba(void);
/* Southbridge IO BARs */
+#define PMBASE 0x40
#define GPIOBASE 0x48
-#define PMBASE 0x40
+#define CIR0050 0x0050 /* 32bit */
#define RPC 0x0400 /* 32bit */
#define RPFN 0x0404 /* 32bit */
@@ -430,6 +434,20 @@ void mainboard_config_rcba(void);
#define IOTR2 0x1e90 /* 64bit */
#define IOTR3 0x1e98 /* 64bit */
+#define V0CTL 0x2014 /* 32bit */
+#define V0STS 0x201a /* 16bit */
+
+#define V1CTL 0x2020 /* 32bit */
+#define V1STS 0x2026 /* 16bit */
+
+#define VPCTL 0x2030 /* 32bit */
+#define VPSTS 0x2038 /* 16bit */
+
+#define VMCTL 0x2040 /* 32bit */
+#define VMSTS 0x2048 /* 16bit */
+
+#define DLCTL2 0x21b0
+
#define TCTL 0x3000 /* 8bit */
#define NOINT 0