aboutsummaryrefslogtreecommitdiff
path: root/src/northbridge
diff options
context:
space:
mode:
authorJason Schildt <jschildt@gmail.com>2005-08-10 15:16:44 +0000
committerJason Schildt <jschildt@gmail.com>2005-08-10 15:16:44 +0000
commit043b409904c8663a2df1f651a91da6366eff6c9b (patch)
tree43be72a4df1429321c87e3c65c372e8ab1d45347 /src/northbridge
parent27b85118807be6e2fbf9bbd65d119538ff276db8 (diff)
Undoing all HDAMA commits from LNXI from r2005->2003
git-svn-id: svn://svn.coreboot.org/coreboot/trunk@2006 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/northbridge')
-rw-r--r--src/northbridge/amd/amdk8/amdk8.h1
-rw-r--r--src/northbridge/amd/amdk8/coherent_ht.c93
-rw-r--r--src/northbridge/amd/amdk8/cpu_rev.c33
-rw-r--r--src/northbridge/amd/amdk8/debug.c32
-rw-r--r--src/northbridge/amd/amdk8/early_ht.c8
-rw-r--r--src/northbridge/amd/amdk8/incoherent_ht.c544
-rw-r--r--src/northbridge/amd/amdk8/northbridge.c213
-rw-r--r--src/northbridge/amd/amdk8/raminit.c156
-rw-r--r--src/northbridge/amd/amdk8/setup_resource_map.c149
9 files changed, 861 insertions, 368 deletions
diff --git a/src/northbridge/amd/amdk8/amdk8.h b/src/northbridge/amd/amdk8/amdk8.h
index 89c03fc16b..ca8e8dc3d2 100644
--- a/src/northbridge/amd/amdk8/amdk8.h
+++ b/src/northbridge/amd/amdk8/amdk8.h
@@ -136,7 +136,6 @@
#define DCL_DisInRcvrs (1<<24)
#define DCL_BypMax_SHIFT 25
#define DCL_En2T (1<<28)
-#define DCL_UpperCSMap (1<<29)
#define DRAM_CONFIG_HIGH 0x94
#define DCH_ASYNC_LAT_SHIFT 0
#define DCH_ASYNC_LAT_MASK 0xf
diff --git a/src/northbridge/amd/amdk8/coherent_ht.c b/src/northbridge/amd/amdk8/coherent_ht.c
index 75c9910b92..c79a432ab5 100644
--- a/src/northbridge/amd/amdk8/coherent_ht.c
+++ b/src/northbridge/amd/amdk8/coherent_ht.c
@@ -155,6 +155,23 @@ static void disable_probes(void)
}
+#ifndef ENABLE_APIC_EXT_ID
+#define ENABLE_APIC_EXT_ID 0
+#endif
+
+static void enable_apic_ext_id(u8 node)
+{
+#if ENABLE_APIC_EXT_ID==1
+#warning "FIXME Is the right place to enable apic ext id here?"
+
+ u32 val;
+
+ val = pci_read_config32(NODE_HT(node), 0x68);
+ val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
+ pci_write_config32(NODE_HT(node), 0x68, val);
+#endif
+}
+
static void enable_routing(u8 node)
{
u32 val;
@@ -275,26 +292,26 @@ static int verify_connection(u8 dest)
return 1;
}
-static unsigned read_freq_cap(device_t dev, unsigned pos)
+static uint16_t read_freq_cap(device_t dev, uint8_t pos)
{
/* Handle bugs in valid hypertransport frequency reporting */
- unsigned freq_cap;
+ uint16_t freq_cap;
uint32_t id;
freq_cap = pci_read_config16(dev, pos);
freq_cap &= ~(1 << HT_FREQ_VENDOR); /* Ignore Vendor HT frequencies */
+#if K8_HT_FREQ_1G_SUPPORT == 1
if (!is_cpu_pre_e0()) {
return freq_cap;
}
+#endif
id = pci_read_config32(dev, 0);
/* AMD K8 Unsupported 1Ghz? */
if (id == (PCI_VENDOR_ID_AMD | (0x1100 << 16))) {
- if (is_cpu_pre_e0()) {
- freq_cap &= ~(1 << HT_FREQ_1000Mhz);
- }
+ freq_cap &= ~(1 << HT_FREQ_1000Mhz);
}
return freq_cap;
@@ -322,10 +339,8 @@ static int optimize_connection(device_t node1, uint8_t link1, device_t node2, ui
/* See if I am changing the link freqency */
old_freq = pci_read_config8(node1, link1 + PCI_HT_CAP_HOST_FREQ);
- old_freq &= 0x0f;
needs_reset |= old_freq != freq;
old_freq = pci_read_config8(node2, link2 + PCI_HT_CAP_HOST_FREQ);
- old_freq &= 0x0f;
needs_reset |= old_freq != freq;
/* Set the Calulcated link frequency */
@@ -367,6 +382,7 @@ static int optimize_connection(device_t node1, uint8_t link1, device_t node2, ui
/* Set node2's widths */
pci_write_config8(node2, link2 + PCI_HT_CAP_HOST_WIDTH + 1, width);
+
return needs_reset;
}
@@ -1609,9 +1625,9 @@ static void clear_dead_routes(unsigned nodes)
}
#endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
-static unsigned count_cpus(unsigned nodes)
-{
#if CONFIG_LOGICAL_CPUS==1
+static unsigned verify_dualcore(unsigned nodes)
+{
unsigned node, totalcpus, tmp;
totalcpus = 0;
@@ -1621,21 +1637,25 @@ static unsigned count_cpus(unsigned nodes)
}
return totalcpus;
-#else
- return nodes;
-#endif
}
+#endif
static void coherent_ht_finalize(unsigned nodes)
{
- unsigned total_cpus;
- unsigned cpu_node_count;
unsigned node;
int rev_a0;
- total_cpus = count_cpus(nodes);
- cpu_node_count = ((total_cpus -1)<<16)|((nodes - 1) << 4);
+#if CONFIG_LOGICAL_CPUS==1
+ unsigned total_cpus;
+ if(read_option(CMOS_VSTART_dual_core, CMOS_VLEN_dual_core, 0) == 0) { /* dual_core */
+ total_cpus = verify_dualcore(nodes);
+ }
+ else {
+ total_cpus = nodes;
+ }
+#endif
+
/* set up cpu count and node count and enable Limit
* Config Space Range for all available CPUs.
* Also clear non coherent hypertransport bus range
@@ -1652,7 +1672,11 @@ static void coherent_ht_finalize(unsigned nodes)
/* Set the Total CPU and Node count in the system */
val = pci_read_config32(dev, 0x60);
val &= (~0x000F0070);
- val |= cpu_node_count;
+#if CONFIG_LOGICAL_CPUS==1
+ val |= ((total_cpus-1)<<16)|((nodes-1)<<4);
+#else
+ val |= ((nodes-1)<<16)|((nodes-1)<<4);
+#endif
pci_write_config32(dev, 0x60, val);
/* Only respond to real cpu pci configuration cycles
@@ -1762,33 +1786,6 @@ static int optimize_link_read_pointers(unsigned nodes, int needs_reset)
return needs_reset;
}
-static void startup_other_cores(unsigned nodes)
-{
- unsigned node;
- for(node = 0; node < nodes; node++) {
- device_t dev;
- unsigned siblings;
- dev = NODE_MC(node);
- siblings = (pci_read_config32(dev, 0xe8) >> 12) & 0x3;
-
- if (siblings) {
- device_t dev_f0;
- unsigned val;
- /* Redirect all MC4 accesses and error logging to core0 */
- val = pci_read_config32(dev, 0x44);
- val |= (1 << 27); //NbMcaToMstCpuEn bit
- pci_write_config32(dev, 0x44, val);
-
- dev_f0 = NODE_HT(node);
- /* Enable extended apic id's and second core */
- val = pci_read_config32(dev_f0, 0x68);
- val |= (1 << 18) | (1 << 17) | ( 1 << 5);
- pci_write_config32(dev_f0, 0x68, val);
- }
- }
-}
-
-
static int setup_coherent_ht_domain(void)
{
struct setup_smp_result result;
@@ -1802,15 +1799,15 @@ static int setup_coherent_ht_domain(void)
enable_bsp_routing();
#if CONFIG_MAX_PHYSICAL_CPUS > 1
- result = setup_smp();
+ result = setup_smp();
+ result.nodes = verify_mp_capabilities(result.nodes);
+ clear_dead_routes(result.nodes);
#endif
- result.nodes = verify_mp_capabilities(result.nodes);
- clear_dead_routes(result.nodes);
+
if (result.nodes == 1) {
setup_uniprocessor();
}
coherent_ht_finalize(result.nodes);
- startup_other_cores(result.nodes);
result.needs_reset = apply_cpu_errata_fixes(result.nodes, result.needs_reset);
result.needs_reset = optimize_link_read_pointers(result.nodes, result.needs_reset);
return result.needs_reset;
diff --git a/src/northbridge/amd/amdk8/cpu_rev.c b/src/northbridge/amd/amdk8/cpu_rev.c
index c76efa2f39..1eb47f85f5 100644
--- a/src/northbridge/amd/amdk8/cpu_rev.c
+++ b/src/northbridge/amd/amdk8/cpu_rev.c
@@ -3,10 +3,26 @@ static int is_cpu_rev_a0(void)
{
return (cpuid_eax(1) & 0xfffef) == 0x0f00;
}
+//AMD_D0_SUPPORT
+static int is_cpu_pre_d0(void)
+{
+ return (cpuid_eax(1) & 0xfff0f) < 0x10f00;
+}
-static int is_cpu_pre_b3(void)
+static int is_cpu_d0(void)
{
- return (cpuid_eax(1) & 0xfffef) < 0x0f41;
+ return (cpuid_eax(1) & 0xfff0f) == 0x10f00;
+}
+
+//AMD_E0_SUPPORT
+static int is_cpu_pre_e0(void)
+{
+ return (cpuid_eax(1) & 0xfff0f) < 0x20f00;
+}
+
+static int is_cpu_e0(void)
+{
+ return (cpuid_eax(1) & 0xfff00) == 0x20f00;
}
static int is_cpu_pre_c0(void)
@@ -14,12 +30,17 @@ static int is_cpu_pre_c0(void)
return (cpuid_eax(1) & 0xfffef) < 0x0f48;
}
-static int is_cpu_pre_d0(void)
+static int is_cpu_c0(void)
{
- return (cpuid_eax(1) & 0xfff0f) < 0x10000;
+ return (cpuid_eax(1) & 0xfffef) == 0x0f48;
}
-static int is_cpu_pre_e0(void)
+static int is_cpu_pre_b3(void)
{
- return (cpuid_eax(1) & 0xfff0f) < 0x20f00;
+ return (cpuid_eax(1) & 0xfffef) < 0x0f41;
+}
+
+static int is_cpu_b3(void)
+{
+ return (cpuid_eax(1) & 0xfffef) == 0x0f41;
}
diff --git a/src/northbridge/amd/amdk8/debug.c b/src/northbridge/amd/amdk8/debug.c
index 861ad8c38a..d0841e878e 100644
--- a/src/northbridge/amd/amdk8/debug.c
+++ b/src/northbridge/amd/amdk8/debug.c
@@ -5,12 +5,16 @@
#if 1
static void print_debug_pci_dev(unsigned dev)
{
+#if CONFIG_USE_INIT
+ printk_debug("PCI: %02x:%02x.%02x", (dev>>16) & 0xff, (dev>>11) & 0x1f, (dev>>8) & 0x7);
+#else
print_debug("PCI: ");
print_debug_hex8((dev >> 16) & 0xff);
print_debug_char(':');
print_debug_hex8((dev >> 11) & 0x1f);
print_debug_char('.');
print_debug_hex8((dev >> 8) & 7);
+#endif
}
static void print_pci_devices(void)
@@ -27,7 +31,19 @@ static void print_pci_devices(void)
continue;
}
print_debug_pci_dev(dev);
+#if CONFIG_USE_INIT
+ printk_debug(" %04x:%04x\r\n", (id & 0xffff), (id>>16));
+#else
+ print_debug_hex32(id);
print_debug("\r\n");
+#endif
+ if(((dev>>8) & 0x07) == 0) {
+ uint8_t hdr_type;
+ hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
+ if((hdr_type & 0x80) != 0x80) {
+ dev += PCI_DEV(0,0,7);
+ }
+ }
}
}
@@ -72,6 +88,14 @@ static void dump_pci_devices(void)
continue;
}
dump_pci_device(dev);
+
+ if(((dev>>8) & 0x07) == 0) {
+ uint8_t hdr_type;
+ hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
+ if((hdr_type & 0x80) != 0x80) {
+ dev += PCI_DEV(0,0,7);
+ }
+ }
}
}
@@ -89,6 +113,14 @@ static void dump_pci_devices_on_bus(unsigned busn)
continue;
}
dump_pci_device(dev);
+
+ if(((dev>>8) & 0x07) == 0) {
+ uint8_t hdr_type;
+ hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
+ if((hdr_type & 0x80) != 0x80) {
+ dev += PCI_DEV(0,0,7);
+ }
+ }
}
}
diff --git a/src/northbridge/amd/amdk8/early_ht.c b/src/northbridge/amd/amdk8/early_ht.c
index 9f33e00127..2711657455 100644
--- a/src/northbridge/amd/amdk8/early_ht.c
+++ b/src/northbridge/amd/amdk8/early_ht.c
@@ -23,6 +23,14 @@ static int enumerate_ht_chain(void)
break;
}
+#if CK804_DEVN_BASE==0
+ //CK804 workaround:
+ // CK804 UnitID changes not use
+ if(id == 0x005e10de) {
+ break;
+ }
+#endif
+
hdr_type = pci_read_config8(PCI_DEV(0,0,0), PCI_HEADER_TYPE);
pos = 0;
hdr_type &= 0x7f;
diff --git a/src/northbridge/amd/amdk8/incoherent_ht.c b/src/northbridge/amd/amdk8/incoherent_ht.c
index 0de5d1280b..d4271efbda 100644
--- a/src/northbridge/amd/amdk8/incoherent_ht.c
+++ b/src/northbridge/amd/amdk8/incoherent_ht.c
@@ -1,38 +1,55 @@
+/*
+ This should be done by Eric
+ 2004.12 yhlu add multi ht chain dynamically support
+*/
#include <device/pci_def.h>
#include <device/pci_ids.h>
#include <device/hypertransport_def.h>
-static inline void print_linkn_in (const char *strval, unsigned byteval)
+#ifndef K8_HT_FREQ_1G_SUPPORT
+ #define K8_HT_FREQ_1G_SUPPORT 0
+#endif
+
+#ifndef K8_SCAN_PCI_BUS
+ #define K8_SCAN_PCI_BUS 0
+#endif
+
+static inline void print_linkn_in (const char *strval, uint8_t byteval)
{
- print_debug(strval); print_debug_hex8(byteval); print_debug("\r\n");
+#if 1
+#if CONFIG_USE_INIT
+ printk_debug("%s%02x\r\n", strval, byteval);
+#else
+ print_debug(strval); print_debug_hex8(byteval); print_debug("\r\n");
+#endif
+#endif
}
-static unsigned ht_lookup_slave_capability(device_t dev)
+static uint8_t ht_lookup_capability(device_t dev, uint16_t val)
{
- unsigned pos;
- unsigned hdr_type;
+ uint8_t pos;
+ uint8_t hdr_type;
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
pos = 0;
hdr_type &= 0x7f;
if ((hdr_type == PCI_HEADER_TYPE_NORMAL) ||
- (hdr_type == PCI_HEADER_TYPE_BRIDGE))
- {
+ (hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
pos = PCI_CAPABILITY_LIST;
}
if (pos > PCI_CAP_LIST_NEXT) {
pos = pci_read_config8(dev, pos);
}
while(pos != 0) { /* loop through the linked list */
- unsigned cap;
+ uint8_t cap;
cap = pci_read_config8(dev, pos + PCI_CAP_LIST_ID);
if (cap == PCI_CAP_ID_HT) {
- unsigned flags;
+ uint16_t flags;
flags = pci_read_config16(dev, pos + PCI_CAP_FLAGS);
- if ((flags >> 13) == 0) {
- /* Entry is a Slave secondary, success... */
+ if ((flags >> 13) == val) {
+ /* Entry is a slave or host , success... */
break;
}
}
@@ -41,24 +58,52 @@ static unsigned ht_lookup_slave_capability(device_t dev)
return pos;
}
-static void ht_collapse_previous_enumeration(unsigned bus)
+static uint8_t ht_lookup_slave_capability(device_t dev)
+{
+ return ht_lookup_capability(dev, 0); // Slave/Primary Interface Block Format
+}
+
+static uint8_t ht_lookup_host_capability(device_t dev)
+{
+ return ht_lookup_capability(dev, 1); // Host/Secondary Interface Block Format
+}
+
+static void ht_collapse_previous_enumeration(uint8_t bus)
{
device_t dev;
-
+ uint32_t id;
+
+ /* Check if is already collapsed */
+ dev = PCI_DEV(bus, 0, 0);
+ id = pci_read_config32(dev, PCI_VENDOR_ID);
+ if ( ! ( (id == 0xffffffff) || (id == 0x00000000) ||
+ (id == 0x0000ffff) || (id == 0xffff0000) ) ) {
+ return;
+ }
+
/* Spin through the devices and collapse any previous
* hypertransport enumeration.
*/
for(dev = PCI_DEV(bus, 1, 0); dev <= PCI_DEV(bus, 0x1f, 0x7); dev += PCI_DEV(0, 1, 0)) {
- unsigned id;
- unsigned pos, flags;
+ uint32_t id;
+ uint8_t pos;
+ uint16_t flags;
id = pci_read_config32(dev, PCI_VENDOR_ID);
- if ( (id == 0xffffffff) || (id == 0x00000000) ||
- (id == 0x0000ffff) || (id == 0xffff0000))
- {
+ if ((id == 0xffffffff) || (id == 0x00000000) ||
+ (id == 0x0000ffff) || (id == 0xffff0000)) {
continue;
}
-
+#if 0
+#if CK804_DEVN_BASE==0
+ //CK804 workaround:
+ // CK804 UnitID changes not use
+ if(id == 0x005e10de) {
+ break;
+ }
+#endif
+#endif
+
pos = ht_lookup_slave_capability(dev);
if (!pos) {
continue;
@@ -71,11 +116,11 @@ static void ht_collapse_previous_enumeration(unsigned bus)
}
}
-static unsigned ht_read_freq_cap(device_t dev, unsigned pos)
+static uint16_t ht_read_freq_cap(device_t dev, uint8_t pos)
{
/* Handle bugs in valid hypertransport frequency reporting */
- unsigned freq_cap;
- unsigned id;
+ uint16_t freq_cap;
+ uint32_t id;
freq_cap = pci_read_config16(dev, pos);
freq_cap &= ~(1 << HT_FREQ_VENDOR); /* Ignore Vendor HT frequencies */
@@ -85,60 +130,60 @@ static unsigned ht_read_freq_cap(device_t dev, unsigned pos)
/* AMD 8131 Errata 48 */
if (id == (PCI_VENDOR_ID_AMD | (PCI_DEVICE_ID_AMD_8131_PCIX << 16))) {
freq_cap &= ~(1 << HT_FREQ_800Mhz);
- }
+ return freq_cap;
+ }
/* AMD 8151 Errata 23 */
if (id == (PCI_VENDOR_ID_AMD | (PCI_DEVICE_ID_AMD_8151_SYSCTRL << 16))) {
freq_cap &= ~(1 << HT_FREQ_800Mhz);
- }
-
+ return freq_cap;
+ }
+
/* AMD K8 Unsupported 1Ghz? */
if (id == (PCI_VENDOR_ID_AMD | (0x1100 << 16))) {
- /* Supported starting with E0 */
- device_t dev_2 = PCI_DEV(0,0x18,2);
- if(pci_read_config32(dev_2,0x9c) < 0x20f00) {
- freq_cap &= ~(1 << HT_FREQ_1000Mhz);
- }
+ #if K8_HT_FREQ_1G_SUPPORT == 1
+ if (is_cpu_pre_e0()) // CK804 support 1G?
+ #endif
+ freq_cap &= ~(1 << HT_FREQ_1000Mhz);
}
return freq_cap;
}
-
#define LINK_OFFS(CTRL, WIDTH,FREQ,FREQ_CAP) \
- (((CTRL & 0xff) << 24) | ((WIDTH & 0xff) << 16) | ((FREQ & 0xff) << 8) | (FREQ_CAP & 0xFF))
+ (((CTRL & 0xff) << 24) | ((WIDTH & 0xff) << 16) | ((FREQ & 0xff) << 8) | (FREQ_CAP & 0xFF))
-#define LINK_CTRL(OFFS) ((OFFS >> 24) & 0xFF)
+#define LINK_CTRL(OFFS) ((OFFS >> 24) & 0xFF)
#define LINK_WIDTH(OFFS) ((OFFS >> 16) & 0xFF)
-#define LINK_FREQ(OFFS) ((OFFS >> 8) & 0xFF)
+#define LINK_FREQ(OFFS) ((OFFS >> 8) & 0xFF)
#define LINK_FREQ_CAP(OFFS) ((OFFS) & 0xFF)
#define PCI_HT_HOST_OFFS LINK_OFFS( \
- PCI_HT_CAP_HOST_CTRL, \
+ PCI_HT_CAP_HOST_CTRL, \
PCI_HT_CAP_HOST_WIDTH, \
PCI_HT_CAP_HOST_FREQ, \
PCI_HT_CAP_HOST_FREQ_CAP)
#define PCI_HT_SLAVE0_OFFS LINK_OFFS( \
- PCI_HT_CAP_SLAVE_CTRL0, \
+ PCI_HT_CAP_SLAVE_CTRL0, \
PCI_HT_CAP_SLAVE_WIDTH0, \
PCI_HT_CAP_SLAVE_FREQ0, \
PCI_HT_CAP_SLAVE_FREQ_CAP0)
#define PCI_HT_SLAVE1_OFFS LINK_OFFS( \
- PCI_HT_CAP_SLAVE_CTRL1, \
+ PCI_HT_CAP_SLAVE_CTRL1, \
PCI_HT_CAP_SLAVE_WIDTH1, \
PCI_HT_CAP_SLAVE_FREQ1, \
PCI_HT_CAP_SLAVE_FREQ_CAP1)
static int ht_optimize_link(
- device_t dev1, unsigned pos1, unsigned offs1,
- device_t dev2, unsigned pos2, unsigned offs2)
+ device_t dev1, uint8_t pos1, unsigned offs1,
+ device_t dev2, uint8_t pos2, unsigned offs2)
{
static const uint8_t link_width_to_pow2[]= { 3, 4, 0, 5, 1, 2, 0, 0 };
static const uint8_t pow2_to_link_width[] = { 0x7, 4, 5, 0, 1, 3 };
- unsigned freq_cap1, freq_cap2;
- unsigned width_cap1, width_cap2, width, old_width, ln_width1, ln_width2;
- unsigned freq, old_freq;
+ uint16_t freq_cap1, freq_cap2;
+ uint8_t width_cap1, width_cap2, width, old_width, ln_width1, ln_width2;
+ uint8_t freq, old_freq;
int needs_reset;
/* Set link width and frequency */
@@ -154,10 +199,8 @@ static int ht_optimize_link(
/* See if I am changing the link freqency */
old_freq = pci_read_config8(dev1, pos1 + LINK_FREQ(offs1));
- old_freq &= 0x0f;
needs_reset |= old_freq != freq;
old_freq = pci_read_config8(dev2, pos2 + LINK_FREQ(offs2));
- old_freq &= 0x0f;
needs_reset |= old_freq != freq;
/* Set the Calulcated link frequency */
@@ -202,24 +245,139 @@ static int ht_optimize_link(
return needs_reset;
}
+#if (USE_DCACHE_RAM == 1) && (K8_SCAN_PCI_BUS == 1)
+static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus);
+static int scan_pci_bus( unsigned bus)
+{
+ /*
+ here we already can access PCI_DEV(bus, 0, 0) to PCI_DEV(bus, 0x1f, 0x7)
+ So We can scan these devices to find out if they are bridge
+ If it is pci bridge, We need to set busn in bridge, and go on
+ For ht bridge, We need to set the busn in bridge and ht_setup_chainx, and the scan_pci_bus
+ */
+ unsigned int devfn;
+ unsigned new_bus;
+ unsigned max_bus;
+
+ new_bus = (bus & 0xff); // mask out the reset_needed
+
+ if(new_bus<0x40) {
+ max_bus = 0x3f;
+ } else if (new_bus<0x80) {
+ max_bus = 0x7f;
+ } else if (new_bus<0xc0) {
+ max_bus = 0xbf;
+ } else {
+ max_bus = 0xff;
+ }
+
+ new_bus = bus;
+
+#if 0
+#if CONFIG_USE_INIT == 1
+ printk_debug("bus_num=%02x\r\n", bus);
+#endif
+#endif
-static int ht_setup_chainx(device_t udev, unsigned upos, unsigned bus)
+ for (devfn = 0; devfn <= 0xff; devfn++) {
+ uint8_t hdr_type;
+ uint16_t class;
+ uint32_t buses;
+ device_t dev;
+ uint16_t cr;
+ dev = PCI_DEV((bus & 0xff), ((devfn>>3) & 0x1f), (devfn & 0x7));
+ hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
+ class = pci_read_config16(dev, PCI_CLASS_DEVICE);
+
+#if 0
+#if CONFIG_USE_INIT == 1
+ if(hdr_type !=0xff ) {
+ printk_debug("dev=%02x fn=%02x hdr_type=%02x class=%04x\r\n",
+ (devfn>>3)& 0x1f, (devfn & 0x7), hdr_type, class);
+ }
+#endif
+#endif
+ switch(hdr_type & 0x7f) { /* header type */
+ case PCI_HEADER_TYPE_BRIDGE:
+ if (class != PCI_CLASS_BRIDGE_PCI) goto bad;
+ /* set the bus range dev */
+
+ /* Clear all status bits and turn off memory, I/O and master enables. */
+ cr = pci_read_config16(dev, PCI_COMMAND);
+ pci_write_config16(dev, PCI_COMMAND, 0x0000);
+ pci_write_config16(dev, PCI_STATUS, 0xffff);
+
+ buses = pci_read_config32(dev, PCI_PRIMARY_BUS);
+
+ buses &= 0xff000000;
+ new_bus++;
+ buses |= (((unsigned int) (bus & 0xff) << 0) |
+ ((unsigned int) (new_bus & 0xff) << 8) |
+ ((unsigned int) max_bus << 16));
+ pci_write_config32(dev, PCI_PRIMARY_BUS, buses);
+
+ {
+ /* here we need to figure out if dev is a ht bridge
+ if it is ht bridge, we need to call ht_setup_chainx at first
+ Not verified --- yhlu
+ */
+ uint8_t upos;
+ upos = ht_lookup_host_capability(dev); // one func one ht sub
+ if (upos) { // sub ht chain
+ uint8_t busn;
+ busn = (new_bus & 0xff);
+ /* Make certain the HT bus is not enumerated */
+ ht_collapse_previous_enumeration(busn);
+ /* scan the ht chain */
+ new_bus |= (ht_setup_chainx(dev,upos,busn)<<16); // store reset_needed to upword
+ }
+ }
+
+ new_bus = scan_pci_bus(new_bus);
+ /* set real max bus num in that */
+
+ buses = (buses & 0xff00ffff) |
+ ((unsigned int) (new_bus & 0xff) << 16);
+ pci_write_config32(dev, PCI_PRIMARY_BUS, buses);
+
+ pci_write_config16(dev, PCI_COMMAND, cr);
+
+ break;
+ default:
+ bad:
+ ;
+ }
+
+ /* if this is not a multi function device,
+ * or the device is not present don't waste
+ * time probing another function.
+ * Skip to next device.
+ */
+ if ( ((devfn & 0x07) == 0x00) && ((hdr_type & 0x80) != 0x80))
+ {
+ devfn += 0x07;
+ }
+ }
+
+ return new_bus;
+}
+#endif
+static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus)
{
- unsigned next_unitid, last_unitid;
- int reset_needed;
+ uint8_t next_unitid, last_unitid;
unsigned uoffs;
+ int reset_needed=0;
- reset_needed = 0;
uoffs = PCI_HT_HOST_OFFS;
next_unitid = 1;
do {
- unsigned id;
- unsigned pos;
- unsigned flags, count, offs, ctrl;
-
- last_unitid = next_unitid;
-
+ uint32_t id;
+ uint8_t pos;
+ uint16_t flags, ctrl;
+ uint8_t count;
+ unsigned offs;
+
/* Wait until the link initialization is complete */
do {
ctrl = pci_read_config16(udev, upos + LINK_CTRL(uoffs));
@@ -232,11 +390,14 @@ static int ht_setup_chainx(device_t udev, unsigned upos, unsigned bus)
break;
}
} while((ctrl & (1 << 5)) == 0);
-
+
device_t dev = PCI_DEV(bus, 0, 0);
+ last_unitid = next_unitid;
+
id = pci_read_config32(dev, PCI_VENDOR_ID);
+
/* If the chain is enumerated quit */
- if ( (id == 0xffffffff) || (id == 0x00000000) ||
+ if ( (id == 0xffffffff) || (id == 0x00000000) ||
(id == 0x0000ffff) || (id == 0xffff0000))
{
break;
@@ -248,6 +409,13 @@ static int ht_setup_chainx(device_t udev, unsigned upos, unsigned bus)
break;
}
+#if CK804_DEVN_BASE==0
+ //CK804 workaround:
+ // CK804 UnitID changes not use
+ id = pci_read_config32(dev, PCI_VENDOR_ID);
+ if(id != 0x005e10de) {
+#endif
+
/* Update the Unitid of the current device */
flags = pci_read_config16(dev, pos + PCI_CAP_FLAGS);
flags &= ~0x1f; /* mask out the bse Unit ID */
@@ -256,48 +424,60 @@ static int ht_setup_chainx(device_t udev, unsigned upos, unsigned bus)
/* Note the change in device number */
dev = PCI_DEV(bus, next_unitid, 0);
+#if CK804_DEVN_BASE==0
+ }
+ else {
+ dev = PCI_DEV(bus, 0, 0);
+ }
+#endif
- /* Compute the number of unitids consumed */
- count = (flags >> 5) & 0x1f;
- next_unitid += count;
+ /* Compute the number of unitids consumed */
+ count = (flags >> 5) & 0x1f;
+ next_unitid += count;
/* Find which side of the ht link we are on,
* by reading which direction our last write to PCI_CAP_FLAGS
* came from.
*/
flags = pci_read_config16(dev, pos + PCI_CAP_FLAGS);
- offs = ((flags >> 10) & 1) ? PCI_HT_SLAVE1_OFFS : PCI_HT_SLAVE0_OFFS;
+ offs = ((flags>>10) & 1) ? PCI_HT_SLAVE1_OFFS : PCI_HT_SLAVE0_OFFS;
+
+ /* Setup the Hypertransport link */
+ reset_needed |= ht_optimize_link(udev, upos, uoffs, dev, pos, offs);
- /* Setup the Hypertransport link */
- reset_needed |= ht_optimize_link(udev, upos, uoffs, dev, pos, offs);
+#if CK804_DEVN_BASE==0
+ if(id == 0x005e10de) {
+ break;
+ }
+#endif
- /* Remember the location of the last device */
+ /* Remeber the location of the last device */
udev = dev;
upos = pos;
uoffs = ( offs != PCI_HT_SLAVE0_OFFS ) ? PCI_HT_SLAVE0_OFFS : PCI_HT_SLAVE1_OFFS;
} while((last_unitid != next_unitid) && (next_unitid <= 0x1f));
+
return reset_needed;
}
static int ht_setup_chain(device_t udev, unsigned upos)
{
- /* Assumption the HT chain that is bus 0 has the HT I/O Hub on it.
- * On most boards this just happens. If a cpu has multiple
- * non Coherent links the appropriate bus registers for the
- * links needs to be programed to point at bus 0.
- */
+ /* Assumption the HT chain that is bus 0 has the HT I/O Hub on it.
+ * On most boards this just happens. If a cpu has multiple
+ * non Coherent links the appropriate bus registers for the
+ * links needs to be programed to point at bus 0.
+ */
- /* Make certain the HT bus is not enumerated */
- ht_collapse_previous_enumeration(0);
+ /* Make certain the HT bus is not enumerated */
+ ht_collapse_previous_enumeration(0);
- return ht_setup_chainx(udev, upos, 0);
+ return ht_setup_chainx(udev, upos, 0);
}
-
-static int optimize_link_read_pointer(unsigned node, unsigned linkn, unsigned linkt, unsigned val)
+static int optimize_link_read_pointer(uint8_t node, uint8_t linkn, uint8_t linkt, uint8_t val)
{
- unsigned dword, dword_old;
- unsigned link_type;
+ uint32_t dword, dword_old;
+ uint8_t link_type;
/* This works on an Athlon64 because unimplemented links return 0 */
dword = pci_read_config32(PCI_DEV(0,0x18+node,0), 0x98 + (linkn * 0x20));
@@ -318,18 +498,18 @@ static int optimize_link_read_pointer(unsigned node, unsigned linkn, unsigned li
return 0;
}
-static int optimize_link_in_coherent(unsigned ht_c_num)
+static int optimize_link_in_coherent(uint8_t ht_c_num)
{
int reset_needed;
- unsigned i;
+ uint8_t i;
reset_needed = 0;
for (i = 0; i < ht_c_num; i++) {
- unsigned reg;
- unsigned nodeid, linkn;
- unsigned busn;
- unsigned val;
+ uint32_t reg;
+ uint8_t nodeid, linkn;
+ uint8_t busn;
+ uint8_t val;
reg = pci_read_config32(PCI_DEV(0,0x18,1), 0xe0 + i * 4);
@@ -340,11 +520,9 @@ static int optimize_link_in_coherent(unsigned ht_c_num)
reg = pci_read_config32( PCI_DEV(busn, 1, 0), PCI_VENDOR_ID);
if ( (reg & 0xffff) == PCI_VENDOR_ID_AMD) {
val = 0x25;
- }
- else if ( (reg & 0xffff) == PCI_VENDOR_ID_NVIDIA) {
+ } else if ( (reg & 0xffff) == PCI_VENDOR_ID_NVIDIA ) {
val = 0x25;//???
- }
- else {
+ } else {
continue;
}
@@ -355,7 +533,7 @@ static int optimize_link_in_coherent(unsigned ht_c_num)
return reset_needed;
}
-static int ht_setup_chains(unsigned ht_c_num)
+static int ht_setup_chains(uint8_t ht_c_num)
{
/* Assumption the HT chain that is bus 0 has the HT I/O Hub on it.
* On most boards this just happens. If a cpu has multiple
@@ -363,18 +541,21 @@ static int ht_setup_chains(unsigned ht_c_num)
* links needs to be programed to point at bus 0.
*/
int reset_needed;
- unsigned upos;
- device_t udev;
- int i;
+ uint8_t upos;
+ device_t udev;
+ uint8_t i;
reset_needed = 0;
for (i = 0; i < ht_c_num; i++) {
- unsigned reg;
- unsigned devpos;
+ uint32_t reg;
+ uint8_t devpos;
unsigned regpos;
- unsigned dword;
- unsigned busn;
+ uint32_t dword;
+ uint8_t busn;
+ #if (USE_DCACHE_RAM == 1) && (K8_SCAN_PCI_BUS == 1)
+ unsigned bus;
+ #endif
reg = pci_read_config32(PCI_DEV(0,0x18,1), 0xe0 + i * 4);
@@ -388,13 +569,19 @@ static int ht_setup_chains(unsigned ht_c_num)
dword |= (reg & 0xffff0000)>>8;
pci_write_config32( PCI_DEV(0, devpos,0), regpos , dword);
- /* Make certain the HT bus is not enumerated */
- ht_collapse_previous_enumeration(busn);
+ /* Make certain the HT bus is not enumerated */
+ ht_collapse_previous_enumeration(busn);
upos = ((reg & 0xf00)>>8) * 0x20 + 0x80;
- udev = PCI_DEV(0, devpos, 0);
+ udev = PCI_DEV(0, devpos, 0);
- reset_needed |= ht_setup_chainx(udev, upos, busn);
+ reset_needed |= ht_setup_chainx(udev,upos,busn);
+
+ #if (USE_DCACHE_RAM == 1) && (K8_SCAN_PCI_BUS == 1)
+ /* You can use use this in romcc, because there is function call in romcc, recursive will kill you */
+ bus = busn; // we need 32 bit
+ reset_needed |= (scan_pci_bus(bus)>>16); // take out reset_needed that stored in upword
+ #endif
}
reset_needed |= optimize_link_in_coherent(ht_c_num);
@@ -402,81 +589,126 @@ static int ht_setup_chains(unsigned ht_c_num)
return reset_needed;
}
+#ifndef K8_ALLOCATE_IO_RANGE
+ #define K8_ALLOCATE_IO_RANGE 0
+#endif
+
static int ht_setup_chains_x(void)
-{
- unsigned nodeid;
- unsigned reg;
- unsigned tempreg;
- unsigned next_busn;
- unsigned ht_c_num;
- unsigned nodes;
+{
+ uint8_t nodeid;
+ uint32_t reg;
+ uint32_t tempreg;
+ uint8_t next_busn;
+ uint8_t ht_c_num;
+ uint8_t nodes;
+#if K8_ALLOCATE_IO_RANGE == 1
+ unsigned next_io_base;
+#endif
- /* read PCI_DEV(0,0x18,0) 0x64 bit [8:9] to find out SbLink m */
- reg = pci_read_config32(PCI_DEV(0, 0x18, 0), 0x64);
- /* update PCI_DEV(0, 0x18, 1) 0xe0 to 0x05000m03, and next_busn=5+1 */
+ /* read PCI_DEV(0,0x18,0) 0x64 bit [8:9] to find out SbLink m */
+ reg = pci_read_config32(PCI_DEV(0, 0x18, 0), 0x64);
+ /* update PCI_DEV(0, 0x18, 1) 0xe0 to 0x05000m03, and next_busn=0x3f+1 */
print_linkn_in("SBLink=", ((reg>>8) & 3) );
- tempreg = 3 | ( 0<<4) | (((reg>>8) & 3)<<8) | (0<<16)| (5<<24);
- pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0, tempreg);
+ tempreg = 3 | ( 0<<4) | (((reg>>8) & 3)<<8) | (0<<16)| (0x3f<<24);
+ pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0, tempreg);
+
+ next_busn=0x3f+1; /* 0 will be used ht chain with SB we need to keep SB in bus0 in auto stage*/
+
+#if K8_ALLOCATE_IO_RANGE == 1
+ /* io range allocation */
+ tempreg = 0 | (((reg>>8) & 0x3) << 4 )| (0x3<<12); //limit
+ pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC4, tempreg);
+ tempreg = 3 | ( 3<<4) | (0<<12); //base
+ pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC0, tempreg);
+ next_io_base = 0x3+0x1;
+#endif
- next_busn=5+1; /* 0 will be used ht chain with SB we need to keep SB in bus0 in auto stage*/
/* clean others */
- for(ht_c_num=1;ht_c_num<4; ht_c_num++) {
- pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4, 0);
- }
+ for(ht_c_num=1;ht_c_num<4; ht_c_num++) {
+ pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4, 0);
+ /* io range allocation */
+ pci_write_config32(PCI_DEV(0, 0x18, 1), 0xc4 + ht_c_num * 8, 0);
+ pci_write_config32(PCI_DEV(0, 0x18, 1), 0xc0 + ht_c_num * 8, 0);
+ }
nodes = ((pci_read_config32(PCI_DEV(0, 0x18, 0), 0x60)>>4) & 7) + 1;
- for(nodeid=0; nodeid<nodes; nodeid++) {
- device_t dev;
- unsigned linkn;
- dev = PCI_DEV(0, 0x18+nodeid,0);
- for(linkn = 0; linkn<3; linkn++) {
- unsigned regpos;
- regpos = 0x98 + 0x20 * linkn;
- reg = pci_read_config32(dev, regpos);
- if ((reg & 0x17) != 7) continue; /* it is not non conherent or not connected*/
+ for(nodeid=0; nodeid<nodes; nodeid++) {
+ device_t dev;
+ uint8_t linkn;
+ dev = PCI_DEV(0, 0x18+nodeid,0);
+ for(linkn = 0; linkn<3; linkn++) {
+ unsigned regpos;
+ regpos = 0x98 + 0x20 * linkn;
+ reg = pci_read_config32(dev, regpos);
+ if ((reg & 0x17) != 7) continue; /* it is not non conherent or not connected*/
print_linkn_in("NC node|link=", ((nodeid & 0xf)<<4)|(linkn & 0xf));
- tempreg = 3 | (nodeid <<4) | (linkn<<8);
- /*compare (temp & 0xffff), with (PCI(0, 0x18, 1) 0xe0 to 0xec & 0xfffff) */
- for(ht_c_num=0;ht_c_num<4; ht_c_num++) {
- reg = pci_read_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4);
- if(((reg & 0xffff) == (tempreg & 0xffff)) || ((reg & 0xffff) == 0x0000)) { /*we got it*/
- break;
- }
- }
- if(ht_c_num == 4) break; /* used up, only 4 non conherent allowed */
- /*update to 0xe0...*/
+ tempreg = 3 | (nodeid <<4) | (linkn<<8);
+ /*compare (temp & 0xffff), with (PCI(0, 0x18, 1) 0xe0 to 0xec & 0xfffff) */
+ for(ht_c_num=0;ht_c_num<4; ht_c_num++) {
+ reg = pci_read_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4);
+ if(((reg & 0xffff) == (tempreg & 0xffff)) || ((reg & 0xffff) == 0x0000)) { /*we got it*/
+ break;
+ }
+ }
+ if(ht_c_num == 4) break; /*used up only 4 non conherent allowed*/
+ /*update to 0xe0...*/
if((reg & 0xf) == 3) continue; /*SbLink so don't touch it */
print_linkn_in("\tbusn=", next_busn);
- tempreg |= (next_busn<<16)|((next_busn+5)<<24);
- pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4, tempreg);
- next_busn+=5+1;
- }
- }
- /*update 0xe0, 0xe4, 0xe8, 0xec from PCI_DEV(0, 0x18,1) to PCI_DEV(0, 0x19,1) to PCI_DEV(0, 0x1f,1);*/
-
- for(nodeid = 1; nodeid<nodes; nodeid++) {
- int i;
- device_t dev;
- dev = PCI_DEV(0, 0x18+nodeid,1);
- for(i = 0; i< 4; i++) {
- unsigned regpos;
- regpos = 0xe0 + i * 4;
- reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
- pci_write_config32(dev, regpos, reg);
-
- }
- }
+ tempreg |= (next_busn<<16)|((next_busn+0x3f)<<24);
+ pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4, tempreg);
+ next_busn+=0x3f+1;
+
+#if K8_ALLOCATE_IO_RANGE == 1
+ /* io range allocation */
+ tempreg = nodeid | (linkn<<4) | ((next_io_base+0x3)<<12); //limit
+ pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC4 + ht_c_num * 8, tempreg);
+ tempreg = 3 | ( 3<<4) | (next_io_base<<12); //base
+ pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC0 + ht_c_num * 8, tempreg);
+ next_io_base += 0x3+0x1;
+#endif
+
+ }
+ }
+ /*update 0xe0, 0xe4, 0xe8, 0xec from PCI_DEV(0, 0x18,1) to PCI_DEV(0, 0x19,1) to PCI_DEV(0, 0x1f,1);*/
+
+ for(nodeid = 1; nodeid<nodes; nodeid++) {
+ int i;
+ device_t dev;
+ dev = PCI_DEV(0, 0x18+nodeid,1);
+ for(i = 0; i< 4; i++) {
+ unsigned regpos;
+ regpos = 0xe0 + i * 4;
+ reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
+ pci_write_config32(dev, regpos, reg);
+ }
+
+#if K8_ALLOCATE_IO_RANGE == 1
+ /* io range allocation */
+ for(i = 0; i< 4; i++) {
+ unsigned regpos;
+ regpos = 0xc4 + i * 8;
+ reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
+ pci_write_config32(dev, regpos, reg);
+ }
+ for(i = 0; i< 4; i++) {
+ unsigned regpos;
+ regpos = 0xc0 + i * 8;
+ reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
+ pci_write_config32(dev, regpos, reg);
+ }
+#endif
+ }
/* recount ht_c_num*/
- unsigned i=0;
- for(ht_c_num=0;ht_c_num<4; ht_c_num++) {
+ uint8_t i=0;
+ for(ht_c_num=0;ht_c_num<4; ht_c_num++) {
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4);
- if(((reg & 0xf) != 0x0)) {
+ if(((reg & 0xf) != 0x0)) {
i++;
}
- }
+ }
- return ht_setup_chains(i);
+ return ht_setup_chains(i);
}
diff --git a/src/northbridge/amd/amdk8/northbridge.c b/src/northbridge/amd/amdk8/northbridge.c
index 5debc57b2b..e45aff8242 100644
--- a/src/northbridge/amd/amdk8/northbridge.c
+++ b/src/northbridge/amd/amdk8/northbridge.c
@@ -17,9 +17,9 @@
#include <cpu/cpu.h>
#include <cpu/x86/lapic.h>
-#include <cpu/amd/dualcore.h>
#if CONFIG_LOGICAL_CPUS==1
+#include <cpu/amd/dualcore.h>
#include <pc80/mc146818rtc.h>
#endif
@@ -27,7 +27,10 @@
#include "root_complex/chip.h"
#include "northbridge.h"
#include "amdk8.h"
-#include "cpu_rev.c"
+
+#if K8_E0_MEM_HOLE_SIZEK != 0
+#include "./cpu_rev.c"
+#endif
#define FX_DEVS 8
static device_t __f0_dev[FX_DEVS];
@@ -637,41 +640,6 @@ static uint32_t find_pci_tolm(struct bus *bus)
return tolm;
}
-static uint32_t hoist_memory(unsigned long mmio_basek, int i)
-{
- int ii;
- uint32_t carry_over;
- device_t dev;
- uint32_t base, limit;
- uint32_t basek;
- uint32_t hoist;
-
- carry_over = (4*1024*1024) - mmio_basek;
- for(ii=7;ii>i;ii--) {
-
- base = f1_read_config32(0x40 + (ii << 3));
- limit = f1_read_config32(0x44 + (ii << 3));
- if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
- continue;
- }
- f1_write_config32(0x44 + (ii << 3),limit + (carry_over << 2));
- f1_write_config32(0x40 + (ii << 3),base + (carry_over << 2));
- }
- limit = f1_read_config32(0x44 + (i << 3));
- f1_write_config32(0x44 + (i << 3),limit + (carry_over << 2));
- dev = __f1_dev[i];
- base = pci_read_config32(dev, 0x40 + (i << 3));
- basek = (pci_read_config32(dev, 0x40 + (i << 3)) & 0xffff0000) >> 2;
- hoist = /* hole start address */
- ((mmio_basek << 10) & 0xff000000) +
- /* hole address to memory controller address */
- (((basek + carry_over) >> 6) & 0x0000ff00) +
- /* enable */
- 1;
- pci_write_config32(dev, 0xf0, hoist);
- return carry_over;
-}
-
static void pci_domain_set_resources(device_t dev)
{
unsigned long mmio_basek;
@@ -679,22 +647,42 @@ static void pci_domain_set_resources(device_t dev)
int i, idx;
pci_tolm = find_pci_tolm(&dev->link[0]);
-#if 1
- /* work around for kernel dual core NUMA bug */
- printk_debug("find_pci_tolm = %x\n",pci_tolm);
- if(pci_tolm > 0xf8000000) pci_tolm = 0xf8000000;
-#endif
#warning "FIXME handle interleaved nodes"
mmio_basek = pci_tolm >> 10;
/* Round mmio_basek to something the processor can support */
mmio_basek &= ~((1 << 6) -1);
+#if 1
+#warning "FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M MMIO hole"
+ /* Round the mmio hold to 64M */
+ mmio_basek &= ~((64*1024) - 1);
+#endif
+
+#if K8_E0_MEM_HOLE_SIZEK != 0
+ if (!is_cpu_pre_e0())
+ for (i = 0; i < 8; i++) {
+ uint32_t base;
+ base = f1_read_config32(0x40 + (i << 3));
+ if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
+ continue;
+ }
+
+ base = pci_read_config32(__f1_dev[i], 0xf0);
+ if((base & 1)==0) continue;
+ base &= 0xff<<24;
+ base >>= 10;
+ if (mmio_basek > base) {
+ mmio_basek = base;
+ }
+ break; // only one hole
+ }
+#endif
+
idx = 10;
for(i = 0; i < 8; i++) {
uint32_t base, limit;
unsigned basek, limitk, sizek;
-
base = f1_read_config32(0x40 + (i << 3));
limit = f1_read_config32(0x44 + (i << 3));
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
@@ -720,9 +708,6 @@ static void pci_domain_set_resources(device_t dev)
pre_sizek = mmio_basek - basek;
ram_resource(dev, idx++, basek, pre_sizek);
sizek -= pre_sizek;
- if(! is_cpu_pre_e0() ) {
- sizek += hoist_memory(mmio_basek,i);
- }
basek = mmio_basek;
}
if ((basek + sizek) <= 4*1024*1024) {
@@ -782,20 +767,54 @@ static struct device_operations pci_domain_ops = {
.ops_pci_bus = &pci_cf8_conf1,
};
+#define APIC_ID_OFFSET 0x10
+
static unsigned int cpu_bus_scan(device_t dev, unsigned int max)
{
struct bus *cpu_bus;
device_t dev_mc;
- unsigned max_siblings;
+ int bsp_apic_id;
+ int apic_id_offset;
int i,j;
+ unsigned nb_cfg_54;
+ int enable_apic_ext_id;
+ unsigned siblings;
+#if CONFIG_LOGICAL_CPUS == 1
+ int e0_later_single_core;
+ int disable_siblings;
+#endif
+
+ nb_cfg_54 = 0;
+ enable_apic_ext_id = 0;
+ siblings = 0;
+
+ /* Find the bootstrap processors apicid */
+ bsp_apic_id = lapicid();
+
+ /* See if I will enable extended ids' */
+ apic_id_offset = bsp_apic_id;
+
+#if CONFIG_LOGICAL_CPUS == 1
+ disable_siblings = !CONFIG_LOGICAL_CPUS;
+ get_option(&disable_siblings, "dual_core");
+ // for pre_e0, nb_cfg_54 can not be set, ( even set, when you read it still be 0)
+ // How can I get the nb_cfg_54 of every node' nb_cfg_54 in bsp??? and differ d0 and e0 single core
+
+ nb_cfg_54 = read_nb_cfg_54();
+#endif
dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
if (!dev_mc) {
die("0:18.0 not found?");
}
-
- /* For now assume all cpus have the same number of siblings */
- max_siblings = (cpuid_ecx(0x80000008) & 0xff) + 1;
+ if (pci_read_config32(dev_mc, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
+ {
+ enable_apic_ext_id = 1;
+ if (apic_id_offset == 0) {
+ /* bsp apic id is not changed */
+ apic_id_offset = APIC_ID_OFFSET;
+ }
+ }
/* Find which cpus are present */
cpu_bus = &dev->link[0];
@@ -815,36 +834,82 @@ static unsigned int cpu_bus_scan(device_t dev, unsigned int max)
PCI_DEVFN(0x18 + i, j));
}
}
-
- /* Build the cpu device path */
- cpu_path.type = DEVICE_PATH_APIC;
- cpu_path.u.apic.apic_id = i*max_siblings;
- /* See if I can find the cpu */
- cpu = find_dev_path(cpu_bus, &cpu_path);
+#if CONFIG_LOGICAL_CPUS == 1
+ e0_later_single_core = 0;
+ if ((!disable_siblings) && dev && dev->enabled) {
+ j = (pci_read_config32(dev, 0xe8) >> 12) & 3; // dev is func 3
+ printk_debug(" %s siblings=%d\r\n", dev_path(dev), j);
+
+ if(nb_cfg_54) {
+ // For e0 single core if nb_cfg_54 is set, apicid will be 0, 2, 4....
+ // ----> you can mixed single core e0 and dual core e0 at any sequence
+ // That is the typical case
+
+ if(j == 0 ){
+ e0_later_single_core = is_e0_later_in_bsp(i); // single core
+ } else {
+ e0_later_single_core = 0;
+ }
+ if(e0_later_single_core) {
+ printk_debug("\tFound e0 single core\r\n");
+ j=1;
+ }
+
+ if(siblings > j ) {
+ //actually we can't be here, because d0 nb_cfg_54 can not be set
+ //even worse is_e0_later_in_bsp() can not find out if it is d0 or e0
- /* Enable the cpu if I have the processor */
- if (dev && dev->enabled) {
- if (!cpu) {
- cpu = alloc_dev(cpu_bus, &cpu_path);
- }
- if (cpu) {
- cpu->enabled = 1;
- }
+ die("When NB_CFG_54 is set, if you want to mix e0 (single core and dual core) and single core(pre e0) CPUs, you need to put all the single core (pre e0) CPUs before all the (e0 single or dual core) CPUs\r\n");
+ }
+ else {
+ siblings = j;
+ }
+ } else {
+ siblings = j;
+ }
}
+#endif
+#if CONFIG_LOGICAL_CPUS==1
+ for (j = 0; j <= (e0_later_single_core?0:siblings); j++ ) {
+#else
+ for (j = 0; j <= siblings; j++ ) {
+#endif
+ /* Build the cpu device path */
+ cpu_path.type = DEVICE_PATH_APIC;
+ cpu_path.u.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8);
- /* Disable the cpu if I don't have the processor */
- if (cpu && (!dev || !dev->enabled)) {
- cpu->enabled = 0;
- }
+ /* See if I can find the cpu */
+ cpu = find_dev_path(cpu_bus, &cpu_path);
- /* Report what I have done */
- if (cpu) {
- printk_debug("CPU: %s %s\n",
- dev_path(cpu), cpu->enabled?"enabled":"disabled");
- }
+ /* Enable the cpu if I have the processor */
+ if (dev && dev->enabled) {
+ if (!cpu) {
+ cpu = alloc_dev(cpu_bus, &cpu_path);
+ }
+ if (cpu) {
+ cpu->enabled = 1;
+ }
+ }
+
+ /* Disable the cpu if I don't have the processor */
+ if (cpu && (!dev || !dev->enabled)) {
+ cpu->enabled = 0;
+ }
+
+ /* Report what I have done */
+ if (cpu) {
+ if(enable_apic_ext_id) {
+ if(cpu->path.u.apic.apic_id<apic_id_offset) { //all add offset except bsp core0
+ if( (cpu->path.u.apic.apic_id > siblings) || (bsp_apic_id!=0) )
+ cpu->path.u.apic.apic_id += apic_id_offset;
+ }
+ }
+ printk_debug("CPU: %s %s\n",
+ dev_path(cpu), cpu->enabled?"enabled":"disabled");
+ }
+ } //j
}
-
return max;
}
diff --git a/src/northbridge/amd/amdk8/raminit.c b/src/northbridge/amd/amdk8/raminit.c
index d211f4bcc1..5d9c320637 100644
--- a/src/northbridge/amd/amdk8/raminit.c
+++ b/src/northbridge/amd/amdk8/raminit.c
@@ -585,16 +585,6 @@ static void hw_enable_ecc(const struct mem_controller *ctrl)
}
-static void e_step_cpu(const struct mem_controller *ctrl)
-{
- uint32_t dcl,data32;
-
- /* set bit 29 (upper cs map) of function 2 offset 0x90 */
- dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
- dcl |= DCL_UpperCSMap;
- pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
-}
-
static int is_dual_channel(const struct mem_controller *ctrl)
{
uint32_t dcl;
@@ -724,14 +714,28 @@ hw_err:
return sz;
}
+static const unsigned cs_map_aa[15] = {
+ /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
+ 0, 1, 3, 6, 0,
+ 0, 2, 4, 7, 9,
+ 0, 0, 5, 8,10,
+};
+
static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
{
- uint32_t base0, base1;
+ uint32_t base0, base1, map;
uint32_t dch;
if (sz.side1 != sz.side2) {
sz.side2 = 0;
}
+ map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
+ map &= ~(0xf << (index * 4));
+#if K8_4RANK_DIMM_SUPPORT == 1
+ if(sz.rank == 4) {
+ map &= ~(0xf << ( (index + 2) * 4));
+ }
+#endif
/* For each base register.
* Place the dimm size in 32 MB quantities in the bits 31 - 21.
@@ -743,6 +747,22 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
/* Make certain side1 of the dimm is at least 32MB */
if (sz.side1 >= (25 +3)) {
+ if(is_cpu_pre_d0()) {
+ map |= (sz.side1 - (25 + 3)) << (index *4);
+#if K8_4RANK_DIMM_SUPPORT == 1
+ if(sz.rank == 4) {
+ map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
+ }
+#endif
+ }
+ else {
+ map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
+#if K8_4RANK_DIMM_SUPPORT == 1
+ if(sz.rank == 4) {
+ map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
+ }
+#endif
+ }
base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
}
@@ -771,6 +791,8 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
}
#endif
+ pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
+
/* Enable the memory clocks for this DIMM */
if (base0) {
dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
@@ -784,52 +806,6 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
}
}
-
-static void set_dimm_map(const struct mem_controller *ctrl,
- struct dimm_size sz, unsigned index)
-{
- static const unsigned cs_map_aa[15] = {
- /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
- 0, 1, 3, 6, 0,
- 0, 2, 4, 7, 9,
- 0, 0, 5, 8,10,
- };
- uint32_t map;
- int row,col;
-
- map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
- map &= ~(0xf << (index * 4));
-
-#if K8_4RANK_DIMM_SUPPORT == 1
- if(sz.rank == 4) {
- map &= ~(0xf << ( (index + 2) * 4));
- }
-#endif
-
- if (is_cpu_pre_d0()) {
- map |= (sz.side1 - (25 + 3)) << (index *4);
-#if K8_4RANK_DIMM_SUPPORT == 1
- if(sz.rank == 4) {
- map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
- }
-#endif
- } else {
- unsigned val;
- val = cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ];
- if(val == 0) {
- print_err("Invalid Column or Row count\r\n");
- val = 7;
- }
- map |= val << (index*4);
-#if K8_4RANK_DIMM_SUPPORT == 1
- if(sz.rank == 4) {
- map |= val << ( (index + 2) * 4);
- }
-#endif
- }
- pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
-}
-
static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
{
int i;
@@ -844,7 +820,6 @@ static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
return -1; /* Report SPD error */
}
set_dimm_size(ctrl, sz, i);
- set_dimm_map(ctrl, sz, i);
}
return dimm_mask;
}
@@ -890,13 +865,6 @@ static void set_top_mem(unsigned tom_k)
print_spew_hex32(tom_k);
print_spew(" KB\r\n");
-#if 0
- /* Report the amount of memory. */
- print_debug("RAM: 0x");
- print_debug_hex32(tom_k);
- print_debug(" KB\r\n");
-#endif
-
/* Now set top of memory */
msr_t msr;
msr.lo = (tom_k & 0x003fffff) << 10;
@@ -1003,7 +971,7 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
if(is_dual_channel(ctrl)) {
/* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
- print_spew("8 4GB chip selects cannot be interleaved\r\n");
+// print_debug("8 4GB chip selects cannot be interleaved\r\n");
return 0;
}
csbase_inc <<=1;
@@ -1013,7 +981,7 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
csbase_inc = csbase_low_d0[common_cs_mode];
if(is_dual_channel(ctrl)) {
if( (bits==3) && (common_cs_mode > 8)) {
- print_spew("8 cs_mode>8 chip selects cannot be interleaved\r\n");
+// print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
return 0;
}
csbase_inc <<=1;
@@ -1132,6 +1100,25 @@ unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
return end_k;
}
+#if K8_E0_MEM_HOLE_SIZEK != 0
+#define K8_E0_MEM_HOLE_LIMITK 4*1024*1024
+#define K8_E0_MEM_HOLE_BASEK (K8_E0_MEM_HOLE_LIMITK - K8_E0_MEM_HOLE_SIZEK )
+
+static void set_e0_mem_hole(const struct mem_controller *ctrl, unsigned base_k)
+{
+ /* Route the addresses to the controller node */
+ unsigned val;
+
+ val = pci_read_config32(ctrl->f1,0xf0);
+
+ val &= 0x00ff00fe;
+ val = (K8_E0_MEM_HOLE_BASEK << 10) | ((K8_E0_MEM_HOLE_SIZEK+base_k)>>(16-10)) | 1;
+
+ pci_write_config32(ctrl->f1, 0xf0, val);
+}
+
+#endif
+
static void order_dimms(const struct mem_controller *ctrl)
{
unsigned long tom_k, base_k;
@@ -1148,6 +1135,14 @@ static void order_dimms(const struct mem_controller *ctrl)
/* Compute the memory base address */
base_k = memory_end_k(ctrl, ctrl->node_id);
tom_k += base_k;
+#if K8_E0_MEM_HOLE_SIZEK != 0
+ if(!is_cpu_pre_e0()) {
+ /* See if I need to check the range cover hole */
+ if ((base_k <= K8_E0_MEM_HOLE_BASEK) && (tom_k > K8_E0_MEM_HOLE_BASEK)) {
+ tom_k += K8_E0_MEM_HOLE_SIZEK;
+ }
+ }
+#endif
route_dram_accesses(ctrl, base_k, tom_k);
set_top_mem(tom_k);
}
@@ -2150,11 +2145,12 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl)
struct spd_set_memclk_result result;
const struct mem_param *param;
long dimm_mask;
-
+#if 1
if (!controller_present(ctrl)) {
- print_debug("No memory controller present\r\n");
+// print_debug("No memory controller present\r\n");
return;
}
+#endif
hw_enable_ecc(ctrl);
activate_spd_rom(ctrl);
dimm_mask = spd_detect_dimms(ctrl);
@@ -2180,10 +2176,6 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl)
if (dimm_mask < 0)
goto hw_spd_err;
order_dimms(ctrl);
- if( !is_cpu_pre_e0() ) {
- print_debug("E step CPU\r\n");
- e_step_cpu(ctrl);
- }
return;
hw_spd_err:
/* Unrecoverable error reading SPD data */
@@ -2288,6 +2280,22 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
} while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
}
+ // init e0 mem hole here
+#if K8_E0_MEM_HOLE_SIZEK != 0
+ if (!is_cpu_pre_e0()) {
+ uint32_t base, limit;
+ unsigned base_k, limit_k;
+ base = pci_read_config32(ctrl->f1, 0x40 + (i << 3));
+ limit = pci_read_config32(ctrl->f1, 0x44 + (i << 3));
+ base_k = (base & 0xffff0000) >> 2;
+ limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
+ if ((base_k <= K8_E0_MEM_HOLE_BASEK) && (limit_k > K8_E0_MEM_HOLE_BASEK)) {
+ set_e0_mem_hole(ctrl+i, base_k);
+ }
+ }
+
+#endif
+
print_debug(" done\r\n");
}
@@ -2300,7 +2308,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
*/
-#if CONFIG_DCACHE_RAM == 0
+#if USE_DCACHE_RAM == 0
/* Make certain the first 1M of memory is intialized */
print_debug("Clearing initial memory region: ");
diff --git a/src/northbridge/amd/amdk8/setup_resource_map.c b/src/northbridge/amd/amdk8/setup_resource_map.c
index ebd1978a7c..27da719409 100644
--- a/src/northbridge/amd/amdk8/setup_resource_map.c
+++ b/src/northbridge/amd/amdk8/setup_resource_map.c
@@ -1,16 +1,53 @@
#define RES_DEBUG 0
+static void setup_resource_map_offset(const unsigned int *register_values, int max, unsigned offset_pci_dev, unsigned offset_io_base)
+{
+ int i;
+// print_debug("setting up resource map offset....");
+#if 0
+ print_debug("\r\n");
+#endif
+ for(i = 0; i < max; i += 3) {
+ device_t dev;
+ unsigned where;
+ unsigned long reg;
+#if 0
+ #if CONFIG_USE_INIT
+ prink_debug("%08x <- %08x\r\n", register_values[i] + offset_pci_dev, register_values[i+2]);
+ #else
+ print_debug_hex32(register_values[i] + offset_pci_dev);
+ print_debug(" <-");
+ print_debug_hex32(register_values[i+2]);
+ print_debug("\r\n");
+ #endif
+#endif
+ dev = (register_values[i] & ~0xff) + offset_pci_dev;
+ where = register_values[i] & 0xff;
+ reg = pci_read_config32(dev, where);
+ reg &= register_values[i+1];
+ reg |= register_values[i+2] + offset_io_base;
+ pci_write_config32(dev, where, reg);
+#if 0
+ reg = pci_read_config32(register_values[i]);
+ reg &= register_values[i+1];
+ reg |= register_values[i+2] & ~register_values[i+1];
+ pci_write_config32(register_values[i], reg);
+#endif
+ }
+// print_debug("done.\r\n");
+}
+
#define RES_PCI_IO 0x10
#define RES_PORT_IO_8 0x22
#define RES_PORT_IO_32 0x20
-#define RES_MEM_IO 0x30
+#define RES_MEM_IO 0x40
-static void setup_resource_map_x(const unsigned int *register_values, int max)
+static void setup_resource_map_x_offset(const unsigned int *register_values, int max, unsigned offset_pci_dev, unsigned offset_io_base)
{
int i;
#if RES_DEBUG
- print_debug("setting up resource map ex....");
+ print_debug("setting up resource map ex offset....");
#endif
@@ -21,17 +58,23 @@ static void setup_resource_map_x(const unsigned int *register_values, int max)
#if RES_DEBUG
#if CONFIG_USE_INIT
printk_debug("%04x: %02x %08x <- & %08x | %08x\r\n",
- i/4, register_values[i],register_values[i+1], register_values[i+2], register_values[i+3]);
+ i/4, register_values[i],
+ register_values[i+1] + ( (register_values[i]==RES_PCI_IO) ? offset_pci_dev : 0),
+ register_values[i+2],
+ register_values[i+3] + ( ( (register_values[i] & RES_PORT_IO_32) == RES_PORT_IO_32) ? offset_io_base : 0)
+ );
#else
print_debug_hex16(i/4);
print_debug(": ");
print_debug_hex8(register_values[i]);
print_debug(" ");
- print_debug_hex32(register_values[i+1]);
+ print_debug_hex32(register_values[i+1] + ( (register_values[i]==RES_PCI_IO) ? offset_pci_dev : 0) );
print_debug(" <- & ");
print_debug_hex32(register_values[i+2]);
print_debug(" | ");
- print_debug_hex32(register_values[i+3]);
+ print_debug_hex32(register_values[i+3] +
+ (((register_values[i] & RES_PORT_IO_32) == RES_PORT_IO_32) ? offset_io_base : 0)
+ );
print_debug("\r\n");
#endif
#endif
@@ -41,7 +84,7 @@ static void setup_resource_map_x(const unsigned int *register_values, int max)
device_t dev;
unsigned where;
unsigned long reg;
- dev = register_values[i+1] & ~0xff;
+ dev = (register_values[i+1] & ~0xff) + offset_pci_dev;
where = register_values[i+1] & 0xff;
reg = pci_read_config32(dev, where);
reg &= register_values[i+2];
@@ -53,7 +96,7 @@ static void setup_resource_map_x(const unsigned int *register_values, int max)
{
unsigned where;
unsigned reg;
- where = register_values[i+1];
+ where = register_values[i+1] + offset_io_base;
reg = inb(where);
reg &= register_values[i+2];
reg |= register_values[i+3];
@@ -64,7 +107,7 @@ static void setup_resource_map_x(const unsigned int *register_values, int max)
{
unsigned where;
unsigned long reg;
- where = register_values[i+1];
+ where = register_values[i+1] + offset_io_base;
reg = inl(where);
reg &= register_values[i+2];
reg |= register_values[i+3];
@@ -94,7 +137,95 @@ static void setup_resource_map_x(const unsigned int *register_values, int max)
print_debug("done.\r\n");
#endif
}
+static void setup_resource_map_x(const unsigned int *register_values, int max)
+{
+ int i;
+#if RES_DEBUG
+ print_debug("setting up resource map ex offset....");
+
+#endif
+
+#if RES_DEBUG
+ print_debug("\r\n");
+#endif
+ for(i = 0; i < max; i += 4) {
+#if RES_DEBUG
+ #if CONFIG_USE_INIT
+ printk_debug("%04x: %02x %08x <- & %08x | %08x\r\n",
+ i/4, register_values[i],register_values[i+1], register_values[i+2], register_values[i+3]);
+ #else
+ print_debug_hex16(i/4);
+ print_debug(": ");
+ print_debug_hex8(register_values[i]);
+ print_debug(" ");
+ print_debug_hex32(register_values[i+1]);
+ print_debug(" <- & ");
+ print_debug_hex32(register_values[i+2]);
+ print_debug(" | ");
+ print_debug_hex32(register_values[i+3]);
+ print_debug("\r\n");
+ #endif
+#endif
+ switch (register_values[i]) {
+ case RES_PCI_IO: //PCI
+ {
+ device_t dev;
+ unsigned where;
+ unsigned long reg;
+ dev = register_values[i+1] & ~0xff;
+ where = register_values[i+1] & 0xff;
+ reg = pci_read_config32(dev, where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ pci_write_config32(dev, where, reg);
+ }
+ break;
+ case RES_PORT_IO_8: // io 8
+ {
+ unsigned where;
+ unsigned reg;
+ where = register_values[i+1];
+ reg = inb(where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ outb(reg, where);
+ }
+ break;
+ case RES_PORT_IO_32: //io32
+ {
+ unsigned where;
+ unsigned long reg;
+ where = register_values[i+1];
+ reg = inl(where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ outl(reg, where);
+ }
+ break;
+#if 0
+ case RES_MEM_IO: //mem
+ {
+ unsigned where;
+ unsigned long reg;
+ where = register_values[i+1];
+ reg = read32(where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ write32( where, reg);
+ }
+ break;
+#endif
+
+ } // switch
+
+
+ }
+
+#if RES_DEBUG
+ print_debug("done.\r\n");
+#endif
+}
static void setup_iob_resource_map(const unsigned int *register_values, int max)
{