summaryrefslogtreecommitdiff
path: root/src/northbridge
diff options
context:
space:
mode:
authorMarc Jones <marc.jones@amd.com>2007-12-19 01:32:08 +0000
committerMarc Jones <marc.jones@amd.com>2007-12-19 01:32:08 +0000
commit8ae8c8822068ef1722c08073ffa4ecc25633cbee (patch)
tree8c7bbf2f7b791081e486439a9b7ffb2fd6e649ac /src/northbridge
parent2006b38fed2f5f3680de1736f7fc878823f2f93b (diff)
Initial AMD Barcelona support for rev Bx.
These are the core files for HyperTransport, DDR2 Memory, and multi-core initialization. Signed-off-by: Marc Jones <marc.jones@amd.com> Reviewed-by: Jordan Crouse <jordan.crouse@amd.com> Acked-by: Myles Watson <myles@pel.cs.byu.edu> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@3014 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/northbridge')
-rw-r--r--src/northbridge/amd/amdfam10/Config.lb79
-rw-r--r--src/northbridge/amd/amdfam10/amdfam10.h1164
-rw-r--r--src/northbridge/amd/amdfam10/amdfam10_acpi.c382
-rw-r--r--src/northbridge/amd/amdfam10/amdfam10_conf.c874
-rw-r--r--src/northbridge/amd/amdfam10/amdfam10_nums.h41
-rw-r--r--src/northbridge/amd/amdfam10/amdfam10_pci.c73
-rw-r--r--src/northbridge/amd/amdfam10/chip.h24
-rw-r--r--src/northbridge/amd/amdfam10/debug.c331
-rw-r--r--src/northbridge/amd/amdfam10/early_ht.c175
-rw-r--r--src/northbridge/amd/amdfam10/get_pci1234.c118
-rw-r--r--src/northbridge/amd/amdfam10/misc_control.c155
-rw-r--r--src/northbridge/amd/amdfam10/northbridge.c1477
-rw-r--r--src/northbridge/amd/amdfam10/northbridge.h25
-rw-r--r--src/northbridge/amd/amdfam10/raminit.h73
-rw-r--r--src/northbridge/amd/amdfam10/raminit_amdmct.c155
-rw-r--r--src/northbridge/amd/amdfam10/raminit_sysinfo_in_ram.c81
-rw-r--r--src/northbridge/amd/amdfam10/reset_test.c169
-rw-r--r--src/northbridge/amd/amdfam10/resourcemap.c287
-rw-r--r--src/northbridge/amd/amdfam10/root_complex/Config.lb1
-rw-r--r--src/northbridge/amd/amdfam10/root_complex/chip.h24
-rw-r--r--src/northbridge/amd/amdfam10/setup_resource_map.c231
-rw-r--r--src/northbridge/amd/amdfam10/spd_ddr2.h88
-rw-r--r--src/northbridge/amd/amdfam10/ssdt.dsl346
-rw-r--r--src/northbridge/amd/amdfam10/sspr1.dsl39
-rw-r--r--src/northbridge/amd/amdfam10/sspr2.dsl40
-rw-r--r--src/northbridge/amd/amdfam10/sspr3.dsl41
-rw-r--r--src/northbridge/amd/amdfam10/sspr4.dsl42
-rw-r--r--src/northbridge/amd/amdfam10/sspr5.dsl43
-rw-r--r--src/northbridge/amd/amdht/AsPsDefs.h252
-rw-r--r--src/northbridge/amd/amdht/AsPsNb.c145
-rw-r--r--src/northbridge/amd/amdht/AsPsNb.h26
-rw-r--r--src/northbridge/amd/amdht/comlib.c290
-rw-r--r--src/northbridge/amd/amdht/comlib.h59
-rw-r--r--src/northbridge/amd/amdht/h3ffeat.h177
-rw-r--r--src/northbridge/amd/amdht/h3finit.c1678
-rw-r--r--src/northbridge/amd/amdht/h3finit.h613
-rw-r--r--src/northbridge/amd/amdht/h3gtopo.h358
-rw-r--r--src/northbridge/amd/amdht/h3ncmn.c2214
-rw-r--r--src/northbridge/amd/amdht/h3ncmn.h132
-rw-r--r--src/northbridge/amd/amdht/ht_wrapper.c160
-rw-r--r--src/northbridge/amd/amdht/porting.h88
-rw-r--r--src/northbridge/amd/amdmct/amddefs.h69
-rw-r--r--src/northbridge/amd/amdmct/mct/mct.h552
-rw-r--r--src/northbridge/amd/amdmct/mct/mct_d.c3862
-rw-r--r--src/northbridge/amd/amdmct/mct/mct_d.h737
-rw-r--r--src/northbridge/amd/amdmct/mct/mct_d_gcc.h388
-rw-r--r--src/northbridge/amd/amdmct/mct/mct_fd.c25
-rw-r--r--src/northbridge/amd/amdmct/mct/mctardk3.c206
-rw-r--r--src/northbridge/amd/amdmct/mct/mctardk4.c172
-rw-r--r--src/northbridge/amd/amdmct/mct/mctchi_d.c130
-rw-r--r--src/northbridge/amd/amdmct/mct/mctcsi_d.c147
-rw-r--r--src/northbridge/amd/amdmct/mct/mctdqs_d.c1216
-rw-r--r--src/northbridge/amd/amdmct/mct/mctecc_d.c296
-rw-r--r--src/northbridge/amd/amdmct/mct/mctgr.c88
-rw-r--r--src/northbridge/amd/amdmct/mct/mcthdi.c33
-rw-r--r--src/northbridge/amd/amdmct/mct/mctmtr_d.c213
-rw-r--r--src/northbridge/amd/amdmct/mct/mctndi_d.c237
-rw-r--r--src/northbridge/amd/amdmct/mct/mctpro_d.c406
-rw-r--r--src/northbridge/amd/amdmct/mct/mctsrc.c1121
-rw-r--r--src/northbridge/amd/amdmct/mct/mctsrc1p.c96
-rw-r--r--src/northbridge/amd/amdmct/mct/mctsrc2p.c139
-rw-r--r--src/northbridge/amd/amdmct/mct/mcttmrl.c413
-rw-r--r--src/northbridge/amd/amdmct/wrappers/mcti.h59
-rw-r--r--src/northbridge/amd/amdmct/wrappers/mcti_d.c338
64 files changed, 23713 insertions, 0 deletions
diff --git a/src/northbridge/amd/amdfam10/Config.lb b/src/northbridge/amd/amdfam10/Config.lb
new file mode 100644
index 0000000000..96619b23c6
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/Config.lb
@@ -0,0 +1,79 @@
+#
+# This file is part of the LinuxBIOS project.
+#
+# Copyright (C) 2007 Advanced Micro Devices, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+uses CONFIG_CHIP_NAME
+uses AGP_APERTURE_SIZE
+uses HAVE_ACPI_TABLES
+
+default AGP_APERTURE_SIZE=0x4000000
+
+if CONFIG_CHIP_NAME
+ config chip.h
+end
+
+object northbridge.o
+driver misc_control.o
+
+if HAVE_ACPI_TABLES
+ object amdfam10_acpi.o
+ makerule ssdt.c
+ depends "$(TOP)/src/northbridge/amd/amdfam10/ssdt.dsl"
+ action "iasl -tc $(TOP)/src/northbridge/amd/amdfam10/ssdt.dsl"
+ action "perl -pi -e 's/AmlCode/AmlCode_ssdt/g' ssdt.hex"
+ action "mv ssdt.hex ssdt.c"
+ end
+ object ./ssdt.o
+ makerule sspr1.c
+ depends "$(TOP)/src/northbridge/amd/amdfam10/sspr1.dsl"
+ action "iasl -tc $(TOP)/src/northbridge/amd/amdfam10/sspr1.dsl"
+ action "perl -pi -e 's/AmlCode/AmlCode_sspr1/g' sspr1.hex"
+ action "mv sspr1.hex sspr1.c"
+ end
+ object ./sspr1.o
+ makerule sspr2.c
+ depends "$(TOP)/src/northbridge/amd/amdfam10/sspr2.dsl"
+ action "iasl -tc $(TOP)/src/northbridge/amd/amdfam10/sspr2.dsl"
+ action "perl -pi -e 's/AmlCode/AmlCode_sspr2/g' sspr2.hex"
+ action "mv sspr2.hex sspr2.c"
+ end
+ object ./sspr2.o
+ makerule sspr3.c
+ depends "$(TOP)/src/northbridge/amd/amdfam10/sspr3.dsl"
+ action "iasl -tc $(TOP)/src/northbridge/amd/amdfam10/sspr3.dsl"
+ action "perl -pi -e 's/AmlCode/AmlCode_sspr3/g' sspr3.hex"
+ action "mv sspr3.hex sspr3.c"
+ end
+ object ./sspr3.o
+ makerule sspr4.c
+ depends "$(TOP)/src/northbridge/amd/amdfam10/sspr4.dsl"
+ action "iasl -tc $(TOP)/src/northbridge/amd/amdfam10/sspr4.dsl"
+ action "perl -pi -e 's/AmlCode/AmlCode_sspr4/g' sspr4.hex"
+ action "mv sspr4.hex sspr4.c"
+ end
+ object ./sspr4.o
+ makerule sspr5.c
+ depends "$(TOP)/src/northbridge/amd/amdfam10/sspr5.dsl"
+ action "iasl -tc $(TOP)/src/northbridge/amd/amdfam10/sspr5.dsl"
+ action "perl -pi -e 's/AmlCode/AmlCode_sspr5/g' sspr5.hex"
+ action "mv sspr5.hex sspr5.c"
+ end
+ object ./sspr5.o
+end
+
+object get_pci1234.o
diff --git a/src/northbridge/amd/amdfam10/amdfam10.h b/src/northbridge/amd/amdfam10/amdfam10.h
new file mode 100644
index 0000000000..369f8dde20
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/amdfam10.h
@@ -0,0 +1,1164 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AMDFAM10_H
+
+#define AMDFAM10_H
+/* Definitions of various FAM10 registers */
+/* Function 0 */
+#define HT_TRANSACTION_CONTROL 0x68
+#define HTTC_DIS_RD_B_P (1 << 0)
+#define HTTC_DIS_RD_DW_P (1 << 1)
+#define HTTC_DIS_WR_B_P (1 << 2)
+#define HTTC_DIS_WR_DW_P (1 << 3)
+#define HTTC_DIS_MTS (1 << 4)
+#define HTTC_CPU1_EN (1 << 5)
+#define HTTC_CPU_REQ_PASS_PW (1 << 6)
+#define HTTC_CPU_RD_RSP_PASS_PW (1 << 7)
+#define HTTC_DIS_P_MEM_C (1 << 8)
+#define HTTC_DIS_RMT_MEM_C (1 << 9)
+#define HTTC_DIS_FILL_P (1 << 10)
+#define HTTC_RSP_PASS_PW (1 << 11)
+#define HTTC_BUF_REL_PRI_SHIFT 13
+#define HTTC_BUF_REL_PRI_MASK 3
+#define HTTC_BUF_REL_PRI_64 0
+#define HTTC_BUF_REL_PRI_16 1
+#define HTTC_BUF_REL_PRI_8 2
+#define HTTC_BUF_REL_PRI_2 3
+#define HTTC_LIMIT_CLDT_CFG (1 << 15)
+#define HTTC_LINT_EN (1 << 16)
+#define HTTC_APIC_EXT_BRD_CST (1 << 17)
+#define HTTC_APIC_EXT_ID (1 << 18)
+#define HTTC_APIC_EXT_SPUR (1 << 19)
+#define HTTC_SEQ_ID_SRC_NODE_EN (1 << 20)
+#define HTTC_DS_NP_REQ_LIMIT_SHIFT 21
+#define HTTC_DS_NP_REQ_LIMIT_MASK 3
+#define HTTC_DS_NP_REQ_LIMIT_NONE 0
+#define HTTC_DS_NP_REQ_LIMIT_1 1
+#define HTTC_DS_NP_REQ_LIMIT_4 2
+#define HTTC_DS_NP_REQ_LIMIT_8 3
+
+
+/* Function 1 */
+#define PCI_IO_BASE0 0xc0
+#define PCI_IO_BASE1 0xc8
+#define PCI_IO_BASE2 0xd0
+#define PCI_IO_BASE3 0xd8
+#define PCI_IO_BASE_VGA_EN (1 << 4)
+#define PCI_IO_BASE_NO_ISA (1 << 5)
+
+/* Function 2 */
+// 0x1xx is for DCT1
+#define DRAM_CSBASE 0x40
+#define DRAM_CSMASK 0x60
+#define DRAM_BANK_ADDR_MAP 0x80
+
+#define DRAM_CTRL 0x78
+#define DC_RdPtrInit_SHIFT 0
+#define DC_RdPrtInit_MASK 0xf
+#define DC_Twrrd3_2_SHIFT 8 /*DDR3 */
+#define DC_Twrrd3_2_MASK 3
+#define DC_Twrwr3_2_SHIFT 10 /*DDR3 */
+#define DC_Twrwr3_2_MASK 3
+#define DC_Trdrd3_2_SHIFT 12 /*DDR3 */
+#define DC_Trdrd3_2_MASK 3
+#define DC_AltVidC3MemClkTriEn (1<<16)
+#define DC_DqsRcvEnTrain (1<<18)
+#define DC_MaxRdLatency_SHIFT 22
+#define DC_MaxRdLatency_MASK 0x3ff
+
+#define DRAM_INIT 0x7c
+#define DI_MrsAddress_SHIFT 0
+#define DI_MrsAddress_MASK 0xffff
+#define DI_MrsBank_SHIFT 16
+#define DI_MrsBank_MASK 7
+#define DI_MrsChipSel_SHIFT 20
+#define DI_MrsChipSel_MASK 7
+#define DI_SendRchgAll (1<<24)
+#define DI_SendAutoRefresh (1<<25)
+#define DI_SendMrsCmd (1<<26)
+#define DI_DeassertMemRstX (1<<27)
+#define DI_AssertCke (1<<28)
+#define DI_SendZQCmd (1<<29) /*DDR3 */
+#define DI_EnMrsCmd (1<<30)
+#define DI_EnDramInit (1<<31)
+
+#define DRAM_MRS 0x84
+#define DM_BurstCtrl_SHIFT 0
+#define DM_BurstCtrl_MASK 3
+#define DM_DrvImpCtrl_SHIFT 2 /* DDR3 */
+#define DM_DrvImpCtrl_MASK 3
+#define DM_Twr_SHIFT 4 /* DDR3 */
+#define DM_Twr_MASK 7
+#define DM_Twr_BASE 4
+#define DM_Twr_MIN 5
+#define DM_Twr_MAX 12
+#define DM_DramTerm_SHIFT 7 /*DDR3 */
+#define DM_DramTerm_MASK 7
+#define DM_DramTermDyn_SHIFT 10 /* DDR3 */
+#define DM_DramTermDyn_MASK 3
+#define DM_Ooff (1<<13)
+#define DM_ASR (1<<18)
+#define DM_SRT (1<<19)
+#define DM_Tcwl_SHIFT 20
+#define DM_Tcwl_MASK 7
+#define DM_PchgPDModeSel (1<<23) /* DDR3 */
+#define DM_MPrLoc_SHIFT 24 /* DDR3 */
+#define DM_MPrLoc_MASK 3
+#define DM_MprEn (1<<26) /* DDR3 */
+
+#define DRAM_TIMING_LOW 0x88
+#define DTL_TCL_SHIFT 0
+#define DTL_TCL_MASK 0xf
+#define DTL_TCL_BASE 1 /* DDR3 =4 */
+#define DTL_TCL_MIN 3 /* DDR3 =4 */
+#define DTL_TCL_MAX 6 /* DDR3 =12 */
+#define DTL_TRCD_SHIFT 4
+#define DTL_TRCD_MASK 3 /* DDR3 =7 */
+#define DTL_TRCD_BASE 3 /* DDR3 =5 */
+#define DTL_TRCD_MIN 3 /* DDR3 =5 */
+#define DTL_TRCD_MAX 6 /* DDR3 =12 */
+#define DTL_TRP_SHIFT 8 /* DDR3 =7 */
+#define DTL_TRP_MASK 3 /* DDR3 =7 */
+#define DTL_TRP_BASE 3 /* DDR3 =5 */
+#define DTL_TRP_MIN 3 /* DDR3 =5 */
+#define DTL_TRP_MAX 6 /* DDR3 =12 */
+#define DTL_TRTP_SHIFT 11 /*DDR3 =10 */
+#define DTL_TRTP_MASK 1 /*DDR3 =3 */
+#define DTL_TRTP_BASE 2 /* DDR3 =4 */
+#define DTL_TRTP_MIN 2 /* 4 for 64 bytes*/ /* DDR3 =4 for 32bytes or 64bytes */
+#define DTL_TRTP_MAX 3 /* 5 for 64 bytes */ /* DDR3 =7 for 32Bytes or 64bytes */
+#define DTL_TRAS_SHIFT 12
+#define DTL_TRAS_MASK 0xf
+#define DTL_TRAS_BASE 3 /* DDR3 =15 */
+#define DTL_TRAS_MIN 5 /* DDR3 =15 */
+#define DTL_TRAS_MAX 18 /*DDR3 =30 */
+#define DTL_TRC_SHIFT 16
+#define DTL_TRC_MASK 0xf /* DDR3 =0x1f */
+#define DTL_TRC_BASE 11
+#define DTL_TRC_MIN 11
+#define DTL_TRC_MAX 26 /* DDR3 =43 */
+#define DTL_TWR_SHIFT 20 /* only for DDR2, DDR3's is on DC */
+#define DTL_TWR_MASK 3
+#define DTL_TWR_BASE 3
+#define DTL_TWR_MIN 3
+#define DTL_TWR_MAX 6
+#define DTL_TRRD_SHIFT 22
+#define DTL_TRRD_MASK 3
+#define DTL_TRRD_BASE 2 /* DDR3 =4 */
+#define DTL_TRRD_MIN 2 /* DDR3 =4 */
+#define DTL_TRRD_MAX 5 /* DDR3 =7 */
+#define DTL_MemClkDis_SHIFT 24 /* Channel A */
+#define DTL_MemClkDis3 (1 << 26)
+#define DTL_MemClkDis2 (1 << 27)
+#define DTL_MemClkDis1 (1 << 28)
+#define DTL_MemClkDis0 (1 << 29)
+/* DTL_MemClkDis for m2 and s1g1 is different */
+
+#define DRAM_TIMING_HIGH 0x8c
+#define DTH_TRWTWB_SHIFT 0
+#define DTH_TRWTWB_MASK 3
+#define DTH_TRWTWB_BASE 3 /* DDR3 =4 */
+#define DTH_TRWTWB_MIN 3 /* DDR3 =5 */
+#define DTH_TRWTWB_MAX 10 /* DDR3 =11 */
+#define DTH_TRWTTO_SHIFT 4
+#define DTH_TRWTTO_MASK 7
+#define DTH_TRWTTO_BASE 2 /* DDR3 =3 */
+#define DTH_TRWTTO_MIN 2 /* DDR3 =3 */
+#define DTH_TRWTTO_MAX 9 /* DDR3 =10 */
+#define DTH_TWTR_SHIFT 8
+#define DTH_TWTR_MASK 3
+#define DTH_TWTR_BASE 0 /* DDR3 =4 */
+#define DTH_TWTR_MIN 1 /* DDR3 =4 */
+#define DTH_TWTR_MAX 3 /* DDR3 =7 */
+#define DTH_TWRRD_SHIFT 10
+#define DTH_TWRRD_MASK 3 /* For DDR3 3_2 is at 0x78 DC */
+#define DTH_TWRRD_BASE 0 /* DDR3 =0 */
+#define DTH_TWRRD_MIN 0 /* DDR3 =2 */
+#define DTH_TWRRD_MAX 3 /* DDR3 =12 */
+#define DTH_TWRWR_SHIFT 12
+#define DTH_TWRWR_MASK 3 /* For DDR3 3_2 is at 0x78 DC */
+#define DTH_TWRWR_BASE 1
+#define DTH_TWRWR_MIN 1 /* DDR3 =3 */
+#define DTH_TWRWR_MAX 3 /* DDR3 =12 */
+#define DTH_TRDRD_SHIFT 14
+#define DTH_TRDRD_MASK 3 /* For DDR3 3_2 is at 0x78 DC */
+#define DTH_TRDRD_BASE 2
+#define DTH_TRDRD_MIN 2
+#define DTH_TRDRD_MAX 5 /* DDR3 =10 */
+#define DTH_TREF_SHIFT 16
+#define DTH_TREF_MASK 3
+#define DTH_TREF_7_8_US 2
+#define DTH_TREF_3_9_US 3
+#define DTH_DisAutoRefresh (1<<18)
+#define DTH_TRFC0_SHIFT 20 /* for Logical DIMM0 */
+#define DTH_TRFC_MASK 7
+#define DTH_TRFC_75_256M 0
+#define DTH_TRFC_105_512M 1
+#define DTH_TRFC_127_5_1G 2
+#define DTH_TRFC_195_2G 3
+#define DTH_TRFC_327_5_4G 4
+#if 0
+//DDR3
+#define DTH_TRFC_90_512M 1
+#define DTH_TRFC_110_5_1G 2
+#define DTH_TRFC_160_2G 3
+#define DTH_TRFC_300_4G 4
+#define DTH_TRFC_UNDEFINED_8G 5
+#endif
+#define DTH_TRFC1_SHIFT 23 /*for Logical DIMM1 */
+#define DTH_TRFC2_SHIFT 26 /*for Logical DIMM2 */
+#define DTH_TRFC3_SHIFT 29 /*for Logical DIMM3 */
+
+#define DRAM_CONFIG_LOW 0x90
+#define DCL_InitDram (1<<0)
+#define DCL_ExitSelfRef (1<<1)
+#define DCL_PllLockTime_SHIFT 2
+#define DCL_PllLockTime_MASK 3
+#define DCL_PllLockTime_15US 0
+#define DCL_PllLockTime_6US 1
+#define DCL_DramTerm_SHIFT 4
+#define DCL_DramTerm_MASK 3
+#define DCL_DramTerm_No 0
+#define DCL_DramTerm_75_OH 1
+#define DCL_DramTerm_150_OH 2
+#define DCL_DramTerm_50_OH 3
+#define DCL_DisDqsBar (1<<6) /* only for DDR2 */
+#define DCL_DramDrvWeak (1<<7) /* only for DDR2 */
+#define DCL_ParEn (1<<8)
+#define DCL_SelfRefRateEn (1<<9) /* only for DDR2 */
+#define DCL_BurstLength32 (1<<10) /* only for DDR3 */
+#define DCL_Width128 (1<<11)
+#define DCL_X4Dimm_SHIFT 12
+#define DCL_X4Dimm_MASK 0xf
+#define DCL_UnBuffDimm (1<<16)
+#define DCL_EnPhyDqsRcvEnTr (1<<18)
+#define DCL_DimmEccEn (1<<19)
+#define DCL_DynPageCloseEn (1<<20)
+#define DCL_IdleCycInit_SHIFT 21
+#define DCL_IdleCycInit_MASK 3
+#define DCL_IdleCycInit_16CLK 0
+#define DCL_IdleCycInit_32CLK 1
+#define DCL_IdleCycInit_64CLK 2
+#define DCL_IdleCycInit_96CLK 3
+#define DCL_ForceAutoPchg (1<<23)
+
+#define DRAM_CONFIG_HIGH 0x94
+#define DCH_MemClkFreq_SHIFT 0
+#define DCH_MemClkFreq_MASK 7
+#define DCH_MemClkFreq_200MHz 0 /* DDR2 */
+#define DCH_MemClkFreq_266MHz 1 /* DDR2 */
+#define DCH_MemClkFreq_333MHz 2 /* DDR2 */
+#define DCH_MemClkFreq_400MHz 3 /* DDR2 and DDR 3*/
+#define DCH_MemClkFreq_533MHz 4 /* DDR 3 */
+#define DCH_MemClkFreq_667MHz 5 /* DDR 3 */
+#define DCH_MemClkFreq_800MHz 6 /* DDR 3 */
+#define DCH_MemClkFreqVal (1<<3)
+#define DCH_Ddr3Mode (1<<8)
+#define DCH_LegacyBiosMode (1<<9)
+#define DCH_ZqcsInterval_SHIFT 10
+#define DCH_ZqcsInterval_MASK 3
+#define DCH_ZqcsInterval_DIS 0
+#define DCH_ZqcsInterval_64MS 1
+#define DCH_ZqcsInterval_128MS 2
+#define DCH_ZqcsInterval_256MS 3
+#define DCH_RDqsEn (1<<12) /* only for DDR2 */
+#define DCH_DisSimulRdWr (1<<13)
+#define DCH_DisDramInterface (1<<14)
+#define DCH_PowerDownEn (1<<15)
+#define DCH_PowerDownMode_SHIFT 16
+#define DCH_PowerDownMode_MASK 1
+#define DCH_PowerDownMode_Channel_CKE 0
+#define DCH_PowerDownMode_ChipSelect_CKE 1
+#define DCH_FourRankSODimm (1<<17)
+#define DCH_FourRankRDimm (1<<18)
+#define DCH_SlowAccessMode (1<<20)
+#define DCH_BankSwizzleMode (1<<22)
+#define DCH_DcqBypassMax_SHIFT 24
+#define DCH_DcqBypassMax_MASK 0xf
+#define DCH_DcqBypassMax_BASE 0
+#define DCH_DcqBypassMax_MIN 0
+#define DCH_DcqBypassMax_MAX 15
+#define DCH_FourActWindow_SHIFT 28
+#define DCH_FourActWindow_MASK 0xf
+#define DCH_FourActWindow_BASE 7 /* DDR3 15 */
+#define DCH_FourActWindow_MIN 8 /* DDR3 16 */
+#define DCH_FourActWindow_MAX 20 /* DDR3 30 */
+
+
+// for 0x98 index and 0x9c data for DCT0
+// for 0x198 index and 0x19c data for DCT1
+// even at ganged mode, 0x198/0x19c will be used for channnel B
+
+#define DRAM_CTRL_ADDI_DATA_OFFSET 0x98
+#define DCAO_DctOffset_SHIFT 0
+#define DCAO_DctOffset_MASK 0x3fffffff
+#define DCAO_DctAccessWrite (1<<30)
+#define DCAO_DctAccessDone (1<<31)
+
+#define DRAM_CTRL_ADDI_DATA_PORT 0x9c
+
+#define DRAM_OUTPUT_DRV_COMP_CTRL 0x00
+#define DODCC_CkeDrvStren_SHIFT 0
+#define DODCC_CkeDrvStren_MASK 3
+#define DODCC_CkeDrvStren_1_0X 0
+#define DODCC_CkeDrvStren_1_25X 1
+#define DODCC_CkeDrvStren_1_5X 2
+#define DODCC_CkeDrvStren_2_0X 3
+#define DODCC_CsOdtDrvStren_SHIFT 4
+#define DODCC_CsOdtDrvStren_MASK 3
+#define DODCC_CsOdtDrvStren_1_0X 0
+#define DODCC_CsOdtDrvStren_1_25X 1
+#define DODCC_CsOdtDrvStren_1_5X 2
+#define DODCC_CsOdtDrvStren_2_0X 3
+#define DODCC_AddrCmdDrvStren_SHIFT 8
+#define DODCC_AddrCmdDrvStren_MASK 3
+#define DODCC_AddrCmdDrvStren_1_0X 0
+#define DODCC_AddrCmdDrvStren_1_25X 1
+#define DODCC_AddrCmdDrvStren_1_5X 2
+#define DODCC_AddrCmdDrvStren_2_0X 3
+#define DODCC_ClkDrvStren_SHIFT 12
+#define DODCC_ClkDrvStren_MASK 3
+#define DODCC_ClkDrvStren_0_75X 0
+#define DODCC_ClkDrvStren_1_0X 1
+#define DODCC_ClkDrvStren_1_25X 2
+#define DODCC_ClkDrvStren_1_5X 3
+#define DODCC_DataDrvStren_SHIFT 16
+#define DODCC_DataDrvStren_MASK 3
+#define DODCC_DataDrvStren_0_75X 0
+#define DODCC_DataDrvStren_1_0X 1
+#define DODCC_DataDrvStren_1_25X 2
+#define DODCC_DataDrvStren_1_5X 3
+#define DODCC_DqsDrvStren_SHIFT 20
+#define DODCC_DqsDrvStren_MASK 3
+#define DODCC_DqsDrvStren_0_75X 0
+#define DODCC_DqsDrvStren_1_0X 1
+#define DODCC_DqsDrvStren_1_25X 2
+#define DODCC_DqsDrvStren_1_5X 3
+#define DODCC_ProcOdt_SHIFT 28
+#define DODCC_ProcOdt_MASK 3
+#define DODCC_ProcOdt_300_OHMS 0
+#define DODCC_ProcOdt_150_OHMS 1
+#define DODCC_ProcOdt_75_OHMS 2
+#if 0
+//DDR3
+#define DODCC_ProcOdt_240_OHMS 0
+#define DODCC_ProcOdt_120_OHMS 1
+#define DODCC_ProcOdt_60_OHMS 2
+#endif
+
+/*
+ for DDR2 400, 533, 667, F2x[1,0]9C_x[02:01], [03], [06:05], [07] controll timing of all DIMMs
+ for DDR2 800, DDR3 800, 1067, 1333, 1600, F2x[1,0]9C_x[02:01], [03], [06:05], [07] controll timing of DIMM0
+ F2x[1,0]9C_x[102:101], [103], [106:105], [107] controll timing of DIMM1
+ So Socket F with Four Logical DIMM will only support DDR2 800 ?
+*/
+/* there are index +100 ===> for DIMM1
+that are corresponding to 0x01, 0x02, 0x03, 0x05, 0x06, 0x07
+*/
+//02/15/2006 18:37
+#define DRAM_WRITE_DATA_TIMING_CTRL_LOW 0x01
+#define DWDTC_WrDatFineDlyByte0_SHIFT 0
+#define DWDTC_WrDatFineDlyByte_MASK 0x1f
+#define DWDTC_WrDatFineDlyByte_BASE 0
+#define DWDTC_WrDatFineDlyByte_MIN 0
+#define DWDTC_WrDatFineDlyByte_MAX 31 // 1/64 MEMCLK
+#define DWDTC_WrDatGrossDlyByte0_SHIFT 5
+#define DWDTC_WrDatGrossDlyByte_MASK 0x3
+#define DWDTC_WrDatGrossDlyByte_NO_DELAY 0
+#define DWDTC_WrDatGrossDlyByte_0_5_ 1
+#define DWDTC_WrDatGrossDlyByte_1 2
+#define DWDTC_WrDatFineDlyByte1_SHIFT 8
+#define DWDTC_WrDatGrossDlyByte1_SHIFT 13
+#define DWDTC_WrDatFineDlyByte2_SHIFT 16
+#define DWDTC_WrDatGrossDlyByte2_SHIFT 21
+#define DWDTC_WrDatFineDlyByte3_SHIFT 24
+#define DWDTC_WrDatGrossDlyByte3_SHIFT 29
+
+#define DRAM_WRITE_DATA_TIMING_CTRL_HIGH 0x02
+#define DWDTC_WrDatFineDlyByte4_SHIFT 0
+#define DWDTC_WrDatGrossDlyByte4_SHIFT 5
+#define DWDTC_WrDatFineDlyByte5_SHIFT 8
+#define DWDTC_WrDatGrossDlyByte5_SHIFT 13
+#define DWDTC_WrDatFineDlyByte6_SHIFT 16
+#define DWDTC_WrDatGrossDlyByte6_SHIFT 21
+#define DWDTC_WrDatFineDlyByte7_SHIFT 24
+#define DWDTC_WrDatGrossDlyByte7_SHIFT 29
+
+#define DRAM_WRITE_ECC_TIMING_CTRL 0x03
+#define DWETC_WrChkFinDly_SHIFT 0
+#define DWETC_WrChkGrossDly_SHIFT 5
+
+#define DRAM_ADDR_CMD_TIMING_CTRL 0x04
+#define DACTC_CkeFineDelay_SHIFT 0
+#define DACTC_CkeFineDelay_MASK 0x1f
+#define DACTC_CkeFineDelay_BASE 0
+#define DACTC_CkeFineDelay_MIN 0
+#define DACTC_CkeFineDelay_MAX 31
+#define DACTC_CkeSetup (1<<5)
+#define DACTC_CsOdtFineDelay_SHIFT 8
+#define DACTC_CsOdtFineDelay_MASK 0x1f
+#define DACTC_CsOdtFineDelay_BASE 0
+#define DACTC_CsOdtFineDelay_MIN 0
+#define DACTC_CsOdtFineDelay_MAX 31
+#define DACTC_CsOdtSetup (1<<13)
+#define DACTC_AddrCmdFineDelay_SHIFT 16
+#define DACTC_AddrCmdFineDelay_MASK 0x1f
+#define DACTC_AddrCmdFineDelay_BASE 0
+#define DACTC_AddrCmdFineDelay_MIN 0
+#define DACTC_AddrCmdFineDelay_MAX 31
+#define DACTC_AddrCmdSetup (1<<21)
+
+#define DRAM_READ_DQS_TIMING_CTRL_LOW 0x05
+#define DRDTC_RdDqsTimeByte0_SHIFT 0
+#define DRDTC_RdDqsTimeByte_MASK 0x3f
+#define DRDTC_RdDqsTimeByte_BASE 0
+#define DRDTC_RdDqsTimeByte_MIN 0
+#define DRDTC_RdDqsTimeByte_MAX 63 // 1/128 MEMCLK
+#define DRDTC_RdDqsTimeByte1_SHIFT 8
+#define DRDTC_RdDqsTimeByte2_SHIFT 16
+#define DRDTC_RdDqsTimeByte3_SHIFT 24
+
+#define DRAM_READ_DQS_TIMING_CTRL_HIGH 0x06
+#define DRDTC_RdDqsTimeByte4_SHIFT 0
+#define DRDTC_RdDqsTimeByte5_SHIFT 8
+#define DRDTC_RdDqsTimeByte6_SHIFT 16
+#define DRDTC_RdDqsTimeByte7_SHIFT 24
+
+#define DRAM_READ_DQS_ECC_TIMING_CTRL 0x07
+#define DRDETC_RdDqsTimeCheck_SHIFT 0
+
+#define DRAM_PHY_CTRL 0x08
+#define DPC_WrtLvTrEn (1<<0)
+#define DPC_WrtLvTrMode (1<<1)
+#define DPC_TrNibbleSel (1<<2)
+#define DPC_TrDimmSel_SHIFT 4
+#define DPC_TrDimmSel_MASK 3 /* 0-->dimm0, 1-->dimm1, 2--->dimm2, 3--->dimm3 */
+#define DPC_WrLvOdt_SHIFT 8
+#define DPC_WrLvOdt_MASK 0xf /* bit 0-->odt 0, ...*/
+#define DPC_WrLvODtEn (1<<12)
+#define DPC_DqsRcvTrEn (1<<13)
+#define DPC_DisAutoComp (1<<30)
+#define DPC_AsyncCompUpdate (1<<31)
+
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_0 0x10 //DIMM0 Channel A
+#define DDRETC_DqsRcvEnFineDelayByte0_SHIFT 0
+#define DDRETC_DqsRcvEnFineDelayByte0_MASK 0x1f
+#define DDRETC_DqsRcvEnGrossDelayByte0_SHIFT 5
+#define DDRETC_DqsRcvEnGrossDelayByte0_MASK 0x3
+#define DDRETC_DqsRcvEnFineDelayByte1_SHIFT 8
+#define DDRETC_DqsRcvEnGrossDelayByte1_SHIFT 13
+#define DDRETC_DqsRcvEnFineDelayByte2_SHIFT 16
+#define DDRETC_DqsRcvEnGrossDelayByte2_SHIFT 21
+#define DDRETC_DqsRcvEnFineDelayByte3_SHIFT 24
+#define DDRETC_DqsRcvEnGrossDelayByte3_SHIFT 29
+
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_1 0x11 //DIMM0 Channel A
+#define DDRETC_DqsRcvEnFineDelayByte4_SHIFT 0
+#define DDRETC_DqsRcvEnGrossDelayByte4_SHIFT 5
+#define DDRETC_DqsRcvEnFineDelayByte5_SHIFT 8
+#define DDRETC_DqsRcvEnGrossDelayByte5_SHIFT 13
+#define DDRETC_DqsRcvEnFineDelayByte6_SHIFT 16
+#define DDRETC_DqsRcvEnGrossDelayByte6_SHIFT 21
+#define DDRETC_DqsRcvEnFineDelayByte7_SHIFT 24
+#define DDRETC_DqsRcvEnGrossDelayByte7_SHIFT 29
+
+#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_0_0 0x12
+#define DDRETCE_WrChkFineDlyByte0_SHIFT 0
+#define DDRETCE_WrChkGrossDlyByte0_SHIFT 5
+
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_2 0x20 //DIMM0 channel B
+#define DDRETC_DqsRcvEnFineDelayByte8_SHIFT 0
+#define DDRETC_DqsRcvEnGrossDelayByte8_SHIFT 5
+#define DDRETC_DqsRcvEnFineDelayByte9_SHIFT 8
+#define DDRETC_DqsRcvEnGrossDelayByte9_SHIFT 13
+#define DDRETC_DqsRcvEnFineDelayByte10_SHIFT 16
+#define DDRETC_DqsRcvEnGrossDelayByte10_SHIFT 21
+#define DDRETC_DqsRcvEnFineDelayByte11_SHIFT 24
+#define DDRETC_DqsRcvEnGrossDelayByte11_SHIFT 29
+
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_3 0x21 // DIMM0 Channel B
+#define DDRETC_DqsRcvEnFineDelayByte12_SHIFT 0
+#define DDRETC_DqsRcvEnGrossDelayByte12_SHIFT 5
+#define DDRETC_DqsRcvEnFineDelayByte13_SHIFT 8
+#define DDRETC_DqsRcvEnGrossDelayByte13_SHIFT 13
+#define DDRETC_DqsRcvEnFineDelayByte14_SHIFT 16
+#define DDRETC_DqsRcvEnGrossDelayByte14_SHIFT 21
+#define DDRETC_DqsRcvEnFineDelayByte15_SHIFT 24
+#define DDRETC_DqsRcvEnGrossDelayByte15_SHIFT 29
+
+#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_0_1 0x22
+#define DDRETCE_WrChkFineDlyByte1_SHIFT 0
+#define DDRETCE_WrChkGrossDlyByte1_SHIFT 5
+
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_0 0x13 //DIMM1
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_1 0x14
+#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_1_0 0x15
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_2 0x23
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_3 0x24
+#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_1_1 0x25
+
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_0 0x16 // DIMM2
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_1 0x17
+#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_2_0 0x18
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_2 0x26
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_3 0x27
+#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_2_1 0x28
+
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_0 0x19 // DIMM3
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_1 0x1a
+#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_3_0 0x1b
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_2 0x29
+#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_3 0x2a
+#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_3_1 0x2b
+
+/* 04.06.2006 19:12 */
+
+#if 0
+//DDR3
+#define DRAM_DQS_WRITE_TIME_CTRL_0_0 0x30 //DIMM0 Channel A
+#define DDWTC_WrDqsFineDlyByte0_SHIFT 0
+#define DDWTC_WrDqsFineDlyByte0_MASK 0x1f
+#define DDWTC_WrDqsGrossDlyByte0_SHIFT 5
+#define DDWTC_WrDqsGrossDlyByte0_MASK 0x3
+#define DDWTC_WrDqsFineDlyByte1_SHIFT 8
+#define DDWTC_WrDqsGrossDlyByte1_SHIFT 13
+#define DDWTC_WrDqsFineDlyByte2_SHIFT 16
+#define DDWTC_WrDqsGrossDlyByte2_SHIFT 21
+#define DDWTC_WrDqsFineDlyByte3_SHIFT 24
+#define DDWTC_WrDqsGrossDlyByte3_SHIFT 29
+
+#define DRAM_DQS_WRTIE_TIME_CTRL_0_1 0x31 //DIMM0 Channel A
+#define DDWTC_WrDqsFineDlyByte4_SHIFT 0
+#define DDWTC_WrDqsGrossDlyByte4_SHIFT 5
+#define DDWTC_WrDqsFineDlyByte5_SHIFT 8
+#define DDWTC_WrDqsGrossDlyByte5_SHIFT 13
+#define DDWTC_WrDqsFineDlyByte6_SHIFT 16
+#define DDWTC_WrDqsGrossDlyByte6_SHIFT 21
+#define DDWTC_WrDqsFineDlyByte7_SHIFT 24
+#define DDWTC_WrDqsGrossDlyByte7_SHIFT 29
+
+#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_0_0 0x32
+#define DDWTCE_WrDqsChkFineDlyByte0_SHIFT 0
+#define DDWTCE_WrDqsChkGrossDlyByte0_SHIFT 5
+
+#define DRAM_DQS_WRITE_TIME_CTRL_0_2 0x40 //DIMM0 Channel B
+#define DDWTC_WrDqsFineDlyByte8_SHIFT 0
+#define DDWTC_WrDqsGrossDlyByte8_SHIFT 5
+#define DDWTC_WrDqsFineDlyByte9_SHIFT 8
+#define DDWTC_WrDqsGrossDlyByte9_SHIFT 13
+#define DDWTC_WrDqsFineDlyByte10_SHIFT 16
+#define DDWTC_WrDqsGrossDlyByte10_SHIFT 21
+#define DDWTC_WrDqsFineDlyByte11_SHIFT 24
+#define DDWTC_WrDqsGrossDlyByte11_SHIFT 29
+
+#define DRAM_DQS_WRTIE_TIME_CTRL_0_3 0x41 //DIMM0 Channel B
+#define DDWTC_WrDqsFineDlyByte12_SHIFT 0
+#define DDWTC_WrDqsGrossDlyByte12_SHIFT 5
+#define DDWTC_WrDqsFineDlyByte13_SHIFT 8
+#define DDWTC_WrDqsGrossDlyByte13_SHIFT 13
+#define DDWTC_WrDqsFineDlyByte14_SHIFT 16
+#define DDWTC_WrDqsGrossDlyByte14_SHIFT 21
+#define DDWTC_WrDqsFineDlyByte15_SHIFT 24
+#define DDWTC_WrDqsGrossDlyByte15_SHIFT 29
+
+#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_0_1 0x42
+#define DDWTCE_WrDqsChkFineDlyByte1_SHIFT 0
+#define DDWTCE_WrDqsChkGrossDlyByte1_SHIFT 5
+
+#define DRAM_DQS_WRITE_TIME_CTRL_1_0 0x33 //DIMM1 Channel A
+#define DRAM_DQS_WRTIE_TIME_CTRL_1_1 0x34 //DIMM1 Channel A
+#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_1_0 0x35
+#define DRAM_DQS_WRITE_TIME_CTRL_1_2 0x43 //DIMM1 Channel B
+#define DRAM_DQS_WRTIE_TIME_CTRL_1_3 0x44 //DIMM1 Channel B
+#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_1_1 0x45
+#endif
+
+#define DRAM_PHASE_RECOVERY_CTRL_0 0x50
+#define DPRC_PhRecFineDlyByte0_SHIFT 0
+#define DDWTC_PhRecFineDlyByte0_MASK 0x1f
+#define DDWTC_PhRecGrossDlyByte0_SHIFT 5
+#define DDWTC_PhRecGrossDlyByte0_MASK 0x3
+#define DDWTC_PhRecFineDlyByte1_SHIFT 8
+#define DDWTC_PhRecGrossDlyByte1_SHIFT 13
+#define DDWTC_PhRecFineDlyByte2_SHIFT 16
+#define DDWTC_PhRecGrossDlyByte2_SHIFT 21
+#define DDWTC_PhRecFineDlyByte3_SHIFT 24
+#define DDWTC_PhRecGrossDlyByte3_SHIFT 29
+
+#define DRAM_PHASE_RECOVERY_CTRL_1 0x51
+#define DPRC_PhRecFineDlyByte4_SHIFT 0
+#define DDWTC_PhRecGrossDlyByte4_SHIFT 5
+#define DDWTC_PhRecFineDlyByte5_SHIFT 8
+#define DDWTC_PhRecGrossDlyByte5_SHIFT 13
+#define DDWTC_PhRecFineDlyByte6_SHIFT 16
+#define DDWTC_PhRecGrossDlyByte6_SHIFT 21
+#define DDWTC_PhRecFineDlyByte7_SHIFT 24
+#define DDWTC_PhRecGrossDlyByte7_SHIFT 29
+
+#define DRAM_ECC_PHASE_RECOVERY_CTRL 0x52
+#define DEPRC_PhRecEccDlyByte0_SHIFT 0
+#define DEPRC_PhRecEccGrossDlyByte0_SHIFT 5
+
+#define DRAM_WRITE_LEVEL_ERROR 0x53 /* read only */
+#define DWLE_WrLvErr_SHIFT 0
+#define DWLE_WrLvErr_MASK 0xff
+
+#define DRAM_CTRL_MISC 0xa0
+#define DCM_MemCleared (1<<0) /* RD == F2x110 [MemCleared] */
+#define DCM_DramEnabled (1<<9) /* RD == F2x110 [DramEnabled] */
+
+#define NB_TIME_STAMP_COUNT_LOW 0xb0
+#define TscLow_SHIFT 0
+#define TscLow_MASK 0xffffffff
+
+#define NB_TIME_STAMP_COUNT_HIGH 0xb4
+#define TscHigh_SHIFT 0
+#define TscHigh_Mask 0xff
+
+#define DCT_DEBUG_CTRL 0xf0 /* 0xf0 for DCT0, 0x1f0 is for DCT1*/
+#define DDC_DllAdjust_SHIFT 0
+#define DDC_DllAdjust_MASK 0xff
+#define DDC_DllSlower (1<<8)
+#define DDC_DllFaster (1<<9)
+#define DDC_WrtDqsAdjust_SHIFT 16
+#define DDC_WrtDqsAdjust_MASK 0x7
+#define DDC_WrtDqsAdjustEn (1<<19)
+
+#define DRAM_CTRL_SEL_LOW 0x110
+#define DCSL_DctSelHiRngEn (1<<0)
+#define DCSL_DctSelHi (1<<1)
+#define DCSL_DctSelIntLvEn (1<<2)
+#define DCSL_MemClrInit (1<<3) /* WR only */
+#define DCSL_DctGangEn (1<<4)
+#define DCSL_DctDataIntLv (1<<5)
+#define DCSL_DctSelIntLvAddr_SHIFT
+#define DCSL_DctSelIntLvAddr_MASK 3
+#define DCSL_DramEnable (1<<8) /* RD only */
+#define DCSL_MemClrBusy (1<<9) /* RD only */
+#define DCSL_MemCleared (1<<10) /* RD only */
+#define DCSL_DctSelBaseAddr_47_27_SHIFT 11
+#define DCSL_DctSelBaseAddr_47_27_MASK 0x1fffff
+
+#define DRAM_CTRL_SEL_HIGH 0x114
+#define DCSH_DctSelBaseOffset_47_26_SHIFT 10
+#define DCSH_DctSelBaseOffset_47_26_MASK 0x3fffff
+
+#define MEM_CTRL_CONF_LOW 0x118
+#define MCCL_MctPriCpuRd (1<<0)
+#define MCCL_MctPriCpuWr (1<<1)
+#define MCCL_MctPriIsocRd_SHIFT 4
+#define MCCL_MctPriIsoc_MASK 0x3
+#define MCCL_MctPriIsocWr_SHIFT 6
+#define MCCL_MctPriIsocWe_MASK 0x3
+#define MCCL_MctPriDefault_SHIFT 8
+#define MCCL_MctPriDefault_MASK 0x3
+#define MCCL_MctPriWr_SHIFT 10
+#define MCCL_MctPriWr_MASK 0x3
+#define MCCL_MctPriIsoc_SHIFT 12
+#define MCCL_MctPriIsoc_MASK 0x3
+#define MCCL_MctPriTrace_SHIFT 14
+#define MCCL_MctPriTrace_MASK 0x3
+#define MCCL_MctPriScrub_SHIFT 16
+#define MCCL_MctPriScrub_MASK 0x3
+#define MCCL_McqMedPriByPassMax_SHIFT 20
+#define MCCL_McqMedPriByPassMax_MASK 0x7
+#define MCCL_McqHiPriByPassMax_SHIFT 24
+#define MCCL_McqHiPriByPassMax_MASK 0x7
+#define MCCL_MctVarPriCntLmt_SHIFT 28
+#define MCCL_MctVarPriCntLmt_MASK 0x7
+
+#define MEM_CTRL_CONF_HIGH 0x11c
+#define MCCH_DctWrLimit_SHIFT 0
+#define MCCH_DctWrLimit_MASK 0x3
+#define MCCH_MctWrLimit_SHIFT 2
+#define MCCH_MctWrLimit_MASK 0x1f
+#define MCCH_MctPrefReqLimit_SHIFT 7
+#define MCCH_MctPrefReqLimit_MASK 0x1f
+#define MCCH_PrefCpuDis (1<<12)
+#define MCCH_PrefIoDis (1<<13)
+#define MCCH_PrefIoFixStrideEn (1<<14)
+#define MCCH_PrefFixStrideEn (1<<15)
+#define MCCH_PrefFixDist_SHIFT 16
+#define MCCH_PrefFixDist_MASK 0x3
+#define MCCH_PrefConfSat_SHIFT 18
+#define MCCH_PrefConfSat_MASK 0x3
+#define MCCH_PrefOneConf_SHIFT 20
+#define MCCH_PrefOneConf_MASK 0x3
+#define MCCH_PrefTwoConf_SHIFT 22
+#define MCCH_PrefTwoConf_MASK 0x7
+#define MCCH_PrefThreeConf_SHIFT 25
+#define MCCH_prefThreeConf_MASK 0x7
+#define MCCH_PrefDramTrainMode (1<<28)
+#define MCCH_FlushWrOnStpGnt (1<<29)
+#define MCCH_FlushWr (1<<30)
+#define MCCH_MctScrubEn (1<<31)
+
+
+/* Function 3 */
+#define MCA_NB_CONTROL 0x40
+#define MNCT_CorrEccEn (1<<0)
+#define MNCT_UnCorrEccEn (1<<1)
+#define MNCT_CrcErr0En (1<<2) /* Link 0 */
+#define MNCT_CrcErr1En (1<<3)
+#define MNCT_CrcErr2En (1<<4)
+#define MBCT_SyncPkt0En (1<<5) /* Link 0 */
+#define MBCT_SyncPkt1En (1<<6)
+#define MBCT_SyncPkt2En (1<<7)
+#define MBCT_MstrAbrtEn (1<<8)
+#define MBCT_TgtAbrtEn (1<<9)
+#define MBCT_GartTblEkEn (1<<10)
+#define MBCT_AtomicRMWEn (1<<11)
+#define MBCT_WdogTmrRptEn (1<<12)
+#define MBCT_DevErrEn (1<<13)
+#define MBCT_L3ArrayCorEn (1<<14)
+#define MBCT_L3ArrayUncEn (1<<15)
+#define MBCT_HtProtEn (1<<16)
+#define MBCT_HtDataEn (1<<17)
+#define MBCT_DramParEn (1<<18)
+#define MBCT_RtryHt0En (1<<19) /* Link 0 */
+#define MBCT_RtryHt1En (1<<20)
+#define MBCT_RtryHt2En (1<<21)
+#define MBCT_RtryHt3En (1<<22)
+#define MBCT_CrcErr3En (1<<23) /* Link 3*/
+#define MBCT_SyncPkt3En (1<<24) /* Link 4 */
+#define MBCT_McaUsPwDatErrEn (1<<25)
+#define MBCT_NbArrayParEn (1<<26)
+#define MBCT_TblWlkDatErrEn (1<<27)
+#define MBCT_FbDimmCorErrEn (1<<28)
+#define MBCT_FbDimmUnCorErrEn (1<<29)
+
+
+
+#define MCA_NB_CONFIG 0x44
+#define MNC_CpuRdDatErrEn (1<<1)
+#define MNC_SyncOnUcEccEn (1<<2)
+#define MNC_SynvPktGenDis (1<<3)
+#define MNC_SyncPktPropDis (1<<4)
+#define MNC_IoMstAbortDis (1<<5)
+#define MNC_CpuErrDis (1<<6)
+#define MNC_IoErrDis (1<<7)
+#define MNC_WdogTmrDis (1<<8)
+#define MNC_WdogTmrCntSel_2_0_SHIFT 9 /* 3 is ar f3x180 */
+#define MNC_WdogTmrCntSel_2_0_MASK 0x3
+#define MNC_WdogTmrBaseSel_SHIFT 12
+#define MNC_WdogTmrBaseSel_MASK 0x3
+#define MNC_LdtLinkSel_SHIFT 14
+#define MNC_LdtLinkSel_MASK 0x3
+#define MNC_GenCrcErrByte0 (1<<16)
+#define MNC_GenCrcErrByte1 (1<<17)
+#define MNC_SubLinkSel_SHIFT 18
+#define MNC_SubLinkSel_MASK 0x3
+#define MNC_SyncOnWdogEn (1<<20)
+#define MNC_SyncOnAnyErrEn (1<<21)
+#define MNC_DramEccEn (1<<22)
+#define MNC_ChipKillEccEn (1<<23)
+#define MNC_IoRdDatErrEn (1<<24)
+#define MNC_DisPciCfgCpuErrRsp (1<<25)
+#define MNC_CorrMcaExcEn (1<<26)
+#define MNC_NbMcaToMstCpuEn (1<<27)
+#define MNC_DisTgtAbtCpuErrRsp (1<<28)
+#define MNC_DisMstAbtCpuErrRsp (1<<29)
+#define MNC_SyncOnDramAdrParErrEn (1<<30)
+#define MNC_NbMcaLogEn (1<<31)
+
+#define MCA_NB_STATUS_LOW 0x48
+#define MNSL_ErrorCode_SHIFT 0
+#define MNSL_ErrorCode_MASK 0xffff
+#define MNSL_ErrorCodeExt_SHIFT 16
+#define MNSL_ErrorCodeExt_MASK 0x1f
+#define MNSL_Syndrome_15_8_SHIFT 24
+#define MNSL_Syndrome_15_8_MASK 0xff
+
+#define MCA_NB_STATUS_HIGH 0x4c
+#define MNSH_ErrCPU_SHIFT 0
+#define MNSH_ErrCPU_MASK 0xf
+#define MNSH_LDTLink_SHIFT 4
+#define MNSH_LDTLink_MASK 0xf
+#define MNSH_ErrScrub (1<<8)
+#define MNSH_SubLink (1<<9)
+#define MNSH_McaStatusSubCache_SHIFT 10
+#define MNSH_McaStatusSubCache_MASK 0x3
+#define MNSH_Deffered (1<<12)
+#define MNSH_UnCorrECC (1<<13)
+#define MNSH_CorrECC (1<<14)
+#define MNSH_Syndrome_7_0_SHIFT 15
+#define MNSH_Syndrome_7_0_MASK 0xff
+#define MNSH_PCC (1<<25)
+#define MNSH_ErrAddrVal (1<<26)
+#define MNSH_ErrMiscVal (1<<27)
+#define MNSH_ErrEn (1<<28)
+#define MNSH_ErrUnCorr (1<<29)
+#define MNSH_ErrOver (1<<30)
+#define MNSH_ErrValid (1<<31)
+
+#define MCA_NB_ADDR_LOW 0x50
+#define MNAL_ErrAddr_31_1_SHIFT 1
+#define MNAL_ErrAddr_31_1_MASK 0x7fffffff
+
+#define MCA_NB_ADDR_HIGH 0x54
+#define MNAL_ErrAddr_47_32_SHIFT 0
+#define MNAL_ErrAddr_47_32_MASK 0xffff
+
+#define DRAM_SCRUB_RATE_CTRL 0x58
+#define SCRUB_NONE 0
+#define SCRUB_40ns 1
+#define SCRUB_80ns 2
+#define SCRUB_160ns 3
+#define SCRUB_320ns 4
+#define SCRUB_640ns 5
+#define SCRUB_1_28us 6
+#define SCRUB_2_56us 7
+#define SCRUB_5_12us 8
+#define SCRUB_10_2us 9
+#define SCRUB_20_5us 0xa
+#define SCRUB_41_0us 0xb
+#define SCRUB_81_9us 0xc
+#define SCRUB_163_8us 0xd
+#define SCRUB_327_7us 0xe
+#define SCRUB_655_4us 0xf
+#define SCRUB_1_31ms 0x10
+#define SCRUB_2_62ms 0x11
+#define SCRUB_5_24ms 0x12
+#define SCRUB_10_49ms 0x13
+#define SCRUB_20_97ms 0x14
+#define SCRUB_42ms 0x15
+#define SCRUB_84ms 0x16
+#define DSRC_DramScrub_SHFIT 0
+#define DSRC_DramScrub_MASK 0x1f
+#define DSRC_L2Scrub_SHIFT 8
+#define DSRC_L2Scrub_MASK 0x1f
+#define DSRC_DcacheScrub_SHIFT 16
+#define DSRC_DcacheScrub_MASK 0x1f
+#define DSRC_L3Scrub_SHIFT 24
+#define DSRC_L3Scrub_MASK 0x1f
+
+#define DRAM_SCRUB_ADDR_LOW 0x5C
+#define DSAL_ScrubReDirEn (1<<0)
+#define DSAL_ScrubAddrLo_SHIFT 6
+#define DSAL_ScrubAddrLo_MASK 0x3ffffff
+
+#define DRAM_SCRUB_ADDR_HIGH 0x60
+#define DSAH_ScrubAddrHi_SHIFT 0
+#define DSAH_ScrubAddrHi_MASK 0xffff
+
+#define HW_THERMAL_CTRL 0x64
+
+#define SW_THERMAL_CTRL 0x68
+
+#define DATA_BUF_CNT 0x6c
+
+#define SRI_XBAR_CMD_BUF_CNT 0x70
+
+#define XBAR_SRI_CMD_BUF_CNT 0x74
+
+#define MCT_XBAR_CMD_BUF_CNT 0x78
+
+#define ACPI_PWR_STATE_CTRL 0x80 /* till 0x84 */
+
+#define NB_CONFIG_LOW 0x88
+#define NB_CONFIG_HIGH 0x8c
+
+#define GART_APERTURE_CTRL 0x90
+
+#define GART_APERTURE_BASE 0x94
+
+#define GART_TBL_BASE 0x98
+
+#define GART_CACHE_CTRL 0x9c
+
+#define PWR_CTRL_MISC 0xa0
+
+#define RPT_TEMP_CTRL 0xa4
+
+#define ON_LINE_SPARE_CTRL 0xb0
+
+#define SBI_P_STATE_LIMIT 0xc4
+
+#define CLK_PWR_TIMING_CTRL0 0xd4
+#define CLK_PWR_TIMING_CTRL1 0xd8
+#define CLK_PWR_TIMING_CTRL2 0xdc
+
+#define THERMTRIP_STATUS 0xE4
+
+
+#define NORTHBRIDGE_CAP 0xE8
+#define NBCAP_TwoChanDRAMcap (1 << 0)
+#define NBCAP_DualNodeMPcap (1 << 1)
+#define NBCAP_EightNodeMPcap (1 << 2)
+#define NBCAP_ECCcap (1 << 3)
+#define NBCAP_ChipkillECCcap (1 << 4)
+#define NBCAP_DdrMaxRate_SHIFT 5
+#define NBCAP_DdrMaxRate_MASK 7
+#define NBCAP_DdrMaxRate_400 7
+#define NBCAP_DdrMaxRate_533 6
+#define NBCAP_DdrMaxRate_667 5
+#define NBCAP_DdrMaxRate_800 4
+#define NBCAP_DdrMaxRate_1067 3
+#define NBCAP_DdrMaxRate_1333 2
+#define NBCAP_DdrMaxRate_1600 1
+#define NBCAP_DdrMaxRate_3_2G 6
+#define NBCAP_DdrMaxRate_4_0G 5
+#define NBCAP_DdrMaxRate_4_8G 4
+#define NBCAP_DdrMaxRate_6_4G 3
+#define NBCAP_DdrMaxRate_8_0G 2
+#define NBCAP_DdrMaxRate_9_6G 1
+#define NBCAP_Mem_ctrl_cap (1 << 8)
+#define MBCAP_SVMCap (1<<9)
+#define NBCAP_HtcCap (1<<10)
+#define NBCAP_CmpCap_SHIFT 12
+#define NBCAP_CmpCap_MASK 3
+#define NBCAP_MpCap_SHIFT 16
+#define NBCAP_MpCap_MASK 7
+#define NBCAP_MpCap_1N 7
+#define NBCAP_MpCap_2N 6
+#define NBCAP_MpCap_4N 5
+#define NBCAP_MpCap_8N 4
+#define NBCAP_MpCap_32N 0
+#define NBCAP_UnGangEn_SHIFT 20
+#define NBCAP_UnGangEn_MASK 0xf
+#define NBCAP_L3Cap (1<<25)
+#define NBCAP_HtAcCap (1<<26)
+
+/* 04/04/2006 18:00 */
+
+#define EXT_NB_MCA_CTRL 0x180
+
+#define NB_EXT_CONF 0x188
+#define DOWNCORE_CTRL 0x190
+#define DWNCC_DisCore_SHIFT 0
+#define DWNCC_DisCore_MASK 0xf
+
+/* Function 5 for FBDIMM */
+#define FBD_DRAM_TIMING_LOW
+
+#define LinkConnected (1 << 0)
+#define InitComplete (1 << 1)
+#define NonCoherent (1 << 2)
+#define ConnectionPending (1 << 4)
+
+
+#include "amdfam10_nums.h"
+
+#ifdef __ROMCC__
+#if NODE_NUMS==64
+ #define NODE_PCI(x, fn) ((x<32)?(PCI_DEV(CBB,(CDB+x),fn)):(PCI_DEV((CBB-1),(CDB+x-32),fn)))
+#else
+ #define NODE_PCI(x, fn) PCI_DEV(CBB,(CDB+x),fn)
+#endif
+#endif
+
+#include "raminit.h"
+
+#if CONFIG_AMDMCT == 0
+
+//struct definitions
+
+struct dimm_size {
+ u8 per_rank; // it is rows + col + bank_lines + data lines */
+ u8 rows;
+ u8 col;
+ u8 bank; //1, 2, 3 mean 2, 4, 8
+ u8 rank;
+} __attribute__((packed));
+
+struct mem_info { // pernode
+ u32 dimm_mask;
+ struct dimm_size sz[DIMM_SOCKETS*2]; // for ungang support
+ u32 x4_mask;
+ u32 x16_mask;
+ u32 single_rank_mask;
+ u32 page_1k_mask;
+// u32 ecc_mask;
+// u32 registered_mask;
+ u8 is_opteron;
+ u8 is_registered; //don't support mixing on the same channel or between channel
+ u8 is_ecc; //don't support mixing on the same channel or between channel
+ u8 is_Width128;
+ u8 memclk_set; // we need to use this to retrieve the mem param, all dimms need to work at same freq for one node
+ u8 is_cs_interleaved[2]; //cs
+ u8 rsv[1];
+} __attribute__((packed));
+#else
+#include "../amdmct/mct/mct_d.h"
+#endif
+
+struct link_pair_t {
+ device_t udev;
+ u32 upos;
+ u32 uoffs;
+ device_t dev;
+ u32 pos;
+ u32 offs;
+ u8 host;
+ u8 nodeid;
+ u8 linkn;
+ u8 rsv;
+} __attribute__((packed));
+
+struct nodes_info_t {
+ u32 nodes_in_group; // could be 2, 3, 4, 5, 6, 7, 8
+ u32 groups_in_plane; // could be 1, 2, 3, 4, 5
+ u32 planes; // could be 1, 2
+ u32 up_planes; // down planes will be [up_planes, planes)
+} __attribute__((packed));
+
+/* be careful with the alignment of sysinfo, bacause sysinfo may be shared by linuxbios_car and linuxbios_ram stage. and linuxbios_ram may be running at 64bit later.*/
+#if CONFIG_AMDMCT == 0
+
+//#define MEM_CS_COPY 1
+#define MEM_CS_COPY NODE_NUMS
+
+#if MEM_TRAIN_SEQ == 0
+ #define DQS_DELAY_COPY NODE_NUMS
+#else
+// #define DQS_DELAY_COPY 1
+ #define DQS_DELAY_COPY NODE_NUMS
+#endif
+#endif
+
+
+struct sys_info {
+ int32_t needs_reset;
+
+ u8 ln[NODE_NUMS*NODE_NUMS];// [0, 3] link n, [4, 7] will be hop num
+ u16 ln_tn[NODE_NUMS*8]; // for 0x0zzz: bit [0,7] target node num, bit[8,11] respone link from target num; 0x80ff mean not inited, 0x4yyy mean non coherent and yyy is link pair index
+ struct nodes_info_t nodes_info;
+ u32 nodes;
+
+ u8 host_link_freq[NODE_NUMS*8]; // record freq for every link from cpu, 0x0f means don't need to touch it
+ u16 host_link_freq_cap[NODE_NUMS*8]; //cap
+
+ u32 segbit;
+ u32 sbdn;
+ u32 sblk;
+ u32 sbbusn;
+
+ u32 ht_c_num;
+ u32 ht_c_conf_bus[HC_NUMS]; // 4-->32
+
+ struct link_pair_t link_pair[HC_NUMS*4];// enough? only in_conherent, 32 chain and every chain have 4 HT device
+ u32 link_pair_num;
+
+ struct mem_controller ctrl[NODE_NUMS];
+
+#if CONFIG_AMDMCT
+// sMCTStruct MCTData;
+// sDCTStruct *DCTNodeData[NODE_NUMS];
+// sDCTStruct DCTNodeData_a[NODE_NUMS];
+ struct MCTStatStruc MCTstat;
+ struct DCTStatStruc DCTstatA[NODE_NUMS];
+#else
+
+ u8 ctrl_present[NODE_NUMS];
+ struct mem_info meminfo[NODE_NUMS];
+ u8 mem_trained[NODE_NUMS]; //0: no dimm, 1: trained, 0x80: not started, 0x81: recv1 fail, 0x82: Pos Fail, 0x83:recv2 fail
+ u32 tom_m;
+ u32 tom2_m;
+
+ //if we are getting tight of global space, may need to squesh following to one copy
+ u32 mem_base[MEM_CS_COPY][2]; // two dct
+ u32 cs_base[MEM_CS_COPY][2][8]; //8 cs_idx
+ u32 hole_startk; // 0 mean hole
+
+ u8 dqs_delay_a[DQS_DELAY_COPY*2*4*2*9]; //8 node, channel 2, dimm 4, direction 2 , bytelane *9
+ u8 dqs_rcvr_dly_a[DQS_DELAY_COPY*2*4*9]; //8 node, channel 2, dimm 4, bytelane *9
+ u8 dqs_rcvr_dly_a_1[9]; //8 node, channel 2, dimm 4, bytelane *9
+#endif
+
+} __attribute__((packed));
+
+#if CONFIG_AMDMCT == 0
+
+#ifdef __ROMCC__
+static void soft_reset(void);
+#endif
+static void wait_all_core0_mem_trained(struct sys_info *sysinfo)
+{
+ int i;
+ u32 mask_lo = 0;
+ u32 mask_hi = 0;
+ unsigned needs_reset = 0;
+
+ if(sysinfo->nodes == 1) return; // in case only one cpu installed
+ for(i=1; i<sysinfo->nodes; i++) {
+ /* Skip everything if I don't have any memory on this controller */
+ if(sysinfo->mem_trained[i]==0x00) continue;
+
+ if(i<32) {
+ mask_lo |= (1<<i);
+ } else {
+ mask_hi |= (1<<(i-32));
+ }
+ }
+
+ i = 1;
+ while(1) {
+ if(i<32) {
+ if(mask_lo & (1<<i)) {
+ if(sysinfo->mem_trained[i] != 0x80) {
+ mask_lo &= ~(1<<i);
+ }
+ }
+ } else {
+ if(mask_hi & (1<<(i-32))) {
+ if(sysinfo->mem_trained[i] != 0x80) {
+ mask_hi &= ~(1<<(i-32));
+ }
+ }
+ }
+
+ if((!mask_lo) && (!mask_hi)) break;
+
+ i++;
+ i%=sysinfo->nodes;
+ }
+
+ for(i=0; i<sysinfo->nodes; i++) {
+#ifdef __ROMCC__
+ print_debug("mem_trained["); print_debug_hex8(i); print_debug("]="); print_debug_hex8(sysinfo->mem_trained[i]); print_debug("\n");
+#else
+ printk_debug("mem_trained[%02x]=%02x\n", i, sysinfo->mem_trained[i]);
+#endif
+ switch(sysinfo->mem_trained[i]) {
+ case 0: //don't need train
+ case 1: //trained
+ break;
+ case 0x81: //recv1: fail
+ case 0x82: //Pos :fail
+ case 0x83: //recv2: fail
+ needs_reset = 1;
+ break;
+ }
+ }
+ if(needs_reset) {
+#ifdef __ROMCC__
+ print_debug("mem trained failed\n");
+ soft_reset();
+#else
+ printk_debug("mem trained failed\n");
+ hard_reset();
+#endif
+ }
+
+}
+
+#endif
+
+#endif /* AMDFAM10_H */
diff --git a/src/northbridge/amd/amdfam10/amdfam10_acpi.c b/src/northbridge/amd/amdfam10/amdfam10_acpi.c
new file mode 100644
index 0000000000..3c7f16b625
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/amdfam10_acpi.c
@@ -0,0 +1,382 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <console/console.h>
+#include <string.h>
+#include <arch/acpi.h>
+#include <device/pci.h>
+#include <cpu/x86/msr.h>
+#include <cpu/amd/mtrr.h>
+#include <cpu/amd/amdfam10_sysconf.h>
+#include "amdfam10.h"
+
+//it seems some functions can be moved arch/i386/boot/acpi.c
+
+unsigned long acpi_create_madt_lapics(unsigned long current)
+{
+ device_t cpu;
+ int cpu_index = 0;
+
+ for(cpu = all_devices; cpu; cpu = cpu->next) {
+ if ((cpu->path.type != DEVICE_PATH_APIC) ||
+ (cpu->bus->dev->path.type != DEVICE_PATH_APIC_CLUSTER)) {
+ continue;
+ }
+ if (!cpu->enabled) {
+ continue;
+ }
+ current += acpi_create_madt_lapic((acpi_madt_lapic_t *)current, cpu_index, cpu->path.u.apic.apic_id);
+ cpu_index++;
+ }
+ return current;
+}
+
+unsigned long acpi_create_madt_lapic_nmis(unsigned long current, u16 flags, u8 lint)
+{
+ device_t cpu;
+ int cpu_index = 0;
+
+ for(cpu = all_devices; cpu; cpu = cpu->next) {
+ if ((cpu->path.type != DEVICE_PATH_APIC) ||
+ (cpu->bus->dev->path.type != DEVICE_PATH_APIC_CLUSTER)) {
+ continue;
+ }
+ if (!cpu->enabled) {
+ continue;
+ }
+ current += acpi_create_madt_lapic_nmi((acpi_madt_lapic_nmi_t *)current, cpu_index, flags, lint);
+ cpu_index++;
+ }
+ return current;
+}
+
+
+unsigned long acpi_create_srat_lapics(unsigned long current)
+{
+ device_t cpu;
+ int cpu_index = 0;
+
+ for(cpu = all_devices; cpu; cpu = cpu->next) {
+ if ((cpu->path.type != DEVICE_PATH_APIC) ||
+ (cpu->bus->dev->path.type != DEVICE_PATH_APIC_CLUSTER)) {
+ continue;
+ }
+ if (!cpu->enabled) {
+ continue;
+ }
+ printk_debug("SRAT: lapic cpu_index=%02x, node_id=%02x, apic_id=%02x\n", cpu_index, cpu->path.u.apic.node_id, cpu->path.u.apic.apic_id);
+ current += acpi_create_srat_lapic((acpi_srat_lapic_t *)current, cpu->path.u.apic.node_id, cpu->path.u.apic.apic_id);
+ cpu_index++;
+ }
+ return current;
+}
+
+static unsigned long resk(uint64_t value)
+{
+ unsigned long resultk;
+ if (value < (1ULL << 42)) {
+ resultk = value >> 10;
+ } else {
+ resultk = 0xffffffff;
+ }
+ return resultk;
+}
+
+
+struct acpi_srat_mem_state {
+ unsigned long current;
+};
+
+void set_srat_mem(void *gp, struct device *dev, struct resource *res)
+{
+ struct acpi_srat_mem_state *state = gp;
+ unsigned long basek, sizek;
+ basek = resk(res->base);
+ sizek = resk(res->size);
+
+ printk_debug("set_srat_mem: dev %s, res->index=%04x startk=%08x, sizek=%08x\n",
+ dev_path(dev), res->index, basek, sizek);
+ /*
+ 0-640K must be on node 0
+ next range is from 1M---
+ So will cut off before 1M in the mem range
+ */
+ if((basek+sizek)<1024) return;
+
+ if(basek<1024) {
+ sizek -= 1024 - basek;
+ basek = 1024;
+ }
+
+ // need to figure out NV
+ state->current += acpi_create_srat_mem((acpi_srat_mem_t *)state->current, (res->index & 0xf), basek, sizek, 1);
+}
+
+
+unsigned long acpi_fill_srat(unsigned long current)
+{
+ struct acpi_srat_mem_state srat_mem_state;
+
+ /* create all subtables for processors */
+ current = acpi_create_srat_lapics(current);
+
+ /* create all subteble for memory range */
+
+ /* 0-640K must be on node 0 */
+ current += acpi_create_srat_mem((acpi_srat_mem_t *)current, 0, 0, 640, 1);//enable
+
+ srat_mem_state.current = current;
+ search_global_resources(
+ IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | IORESOURCE_CACHEABLE,
+ set_srat_mem, &srat_mem_state);
+
+ current = srat_mem_state.current;
+ return current;
+}
+
+unsigned long acpi_fill_slit(unsigned long current)
+{
+ /* need to find out the node num at first */
+ /* fill the first 8 byte with that num */
+ /* fill the next num*num byte with distance, local is 10, 1 hop mean 20, and 2 hop with 30.... */
+
+ struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE);
+ u8 *ln = sysinfox->ln;
+
+
+ u8 *p = (u8 *)current;
+ int nodes = sysconf.nodes;
+ int i,j;
+ u32 hops;
+
+ memset(p, 0, 8+nodes*nodes);
+ *p = (u8) nodes;
+ p += 8;
+
+ for(i=0;i<nodes;i++) {
+ for(j=0;j<nodes; j++) {
+ if(i==j) {
+ p[i*nodes+j] = 10;
+ } else {
+ hops = (((ln[i*NODE_NUMS+j]>>4) & 0x7)+1);
+ p[i*nodes+j] = hops * 2 + 10;
+ }
+ }
+ }
+
+ current += 8+nodes*nodes;
+ return current;
+}
+
+
+// moved from mb acpi_tables.c
+static void intx_to_stream(u32 val, u32 len, u8 *dest)
+{
+ int i;
+ for(i=0;i<len;i++) {
+ *(dest+i) = (val >> (8*i)) & 0xff;
+ }
+}
+
+
+static void int_to_stream(u32 val, u8 *dest)
+{
+ return intx_to_stream(val, 4, dest);
+}
+
+
+// used by acpi_tables.h
+void update_ssdt(void *ssdt)
+{
+ u8 *BUSN;
+ u8 *MMIO;
+ u8 *PCIO;
+ u8 *SBLK;
+ u8 *TOM1;
+ u8 *SBDN;
+ u8 *HCLK;
+ u8 *HCDN;
+ u8 *CBST;
+ u8 *CBBX;
+ u8 *CBS2;
+ u8 *CBB2;
+
+
+ int i;
+ u32 dword;
+ msr_t msr;
+
+ // the offset could be different if have different HC_NUMS, and HC_POSSIBLE_NUM and ssdt.asl
+ BUSN = ssdt+0x3b; //+5 will be next BUSN
+ MMIO = ssdt+0xe4; //+5 will be next MMIO
+ PCIO = ssdt+0x36d; //+5 will be next PCIO
+ SBLK = ssdt+0x4b2; // one byte
+ TOM1 = ssdt+0x4b9; //
+ SBDN = ssdt+0x4c3;//
+ HCLK = ssdt+0x4d1; //+5 will be next HCLK
+ HCDN = ssdt+0x57a; //+5 will be next HCDN
+ CBBX = ssdt+0x61f; //
+ CBST = ssdt+0x626;
+ CBB2 = ssdt+0x62d; //
+ CBS2 = ssdt+0x634;
+
+ for(i=0;i<HC_NUMS;i++) {
+ dword = sysconf.ht_c_conf_bus[i];
+ int_to_stream(dword, BUSN+i*5);
+ }
+
+ for(i=0;i<(HC_NUMS*2);i++) { // FIXME: change to more chain
+ dword = sysconf.conf_mmio_addrx[i]; //base
+ int_to_stream(dword, MMIO+(i*2)*5);
+ dword = sysconf.conf_mmio_addr[i]; //mask
+ int_to_stream(dword, MMIO+(i*2+1)*5);
+ }
+ for(i=0;i<HC_NUMS;i++) { // FIXME: change to more chain
+ dword = sysconf.conf_io_addrx[i];
+ int_to_stream(dword, PCIO+(i*2)*5);
+ dword = sysconf.conf_io_addr[i];
+ int_to_stream(dword, PCIO+(i*2+1)*5);
+ }
+
+ *SBLK = (u8)(sysconf.sblk);
+
+ msr = rdmsr(TOP_MEM);
+ int_to_stream(msr.lo, TOM1);
+
+ int_to_stream(sysconf.sbdn, SBDN);
+
+ for(i=0;i<sysconf.hc_possible_num;i++) {
+ int_to_stream(sysconf.pci1234[i], HCLK + i*5);
+ int_to_stream(sysconf.hcdn[i], HCDN + i*5);
+ }
+ for(i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8
+ int_to_stream(0x00000000, HCLK + i*5);
+ int_to_stream(0x20202020, HCDN + i*5);
+ }
+
+ *CBBX = (u8)(CBB);
+
+ if(CBB == 0xff) {
+ *CBST = (u8) (0x0f);
+ } else {
+ if((sysconf.pci1234[0] >> 12) & 0xff) { //sb chain on other than bus 0
+ *CBST = (u8) (0x0f);
+ }
+ else {
+ *CBST = (u8) (0x00);
+ }
+ }
+
+ if((CBB == 0xff) && (sysconf.nodes>32)) {
+ *CBS2 = 0x0f;
+ *CBB2 = (u8)(CBB-1);
+ } else {
+ *CBS2 = 0x00;
+ *CBB2 = 0x00;
+ }
+
+}
+
+
+void update_sspr(void *sspr, u32 nodeid, u32 cpuindex)
+{
+ u8 *CPU;
+ u8 *CPUIN;
+ u8 *COREFREQ;
+ u8 *POWER;
+ u8 *TRANSITION_LAT;
+ u8 *BUSMASTER_LAT;
+ u8 *CONTROL;
+ u8 *STATUS;
+ unsigned offset = 0x94 - 0x7f;
+ int i;
+
+ CPU = sspr + 0x38;
+ CPUIN = sspr + 0x3a;
+
+ COREFREQ = sspr + 0x7f; //2 byte
+ POWER = sspr + 0x82; //3 bytes
+ TRANSITION_LAT = sspr + 0x87; //two bytes
+ BUSMASTER_LAT = sspr + 0x8a; //two bytes
+ CONTROL = sspr + 0x8d;
+ STATUS = sspr + 0x8f;
+
+ sprintf(CPU, "%02x", (u8)cpuindex);
+ *CPUIN = (u8) cpuindex;
+
+ for(i=0;i<sysconf.p_state_num;i++) {
+ struct p_state_t *p_state = &sysconf.p_state[nodeid * 5 + i];
+ intx_to_stream(COREFREQ + i*offset, 2, p_state->corefreq);
+ intx_to_stream(POWER + i*offset, 3, p_state->power);
+ intx_to_stream(TRANSITION_LAT + i*offset, 2, p_state->transition_lat);
+ intx_to_stream(BUSMASTER_LAT + i*offset, 2, p_state->busmaster_lat);
+ *((u8 *)(CONTROL + i*offset)) =(u8) p_state->control;
+ *((u8 *)(STATUS + i*offset)) =(u8) p_state->status;
+ }
+}
+
+extern unsigned char AmlCode_sspr5[];
+extern unsigned char AmlCode_sspr4[];
+extern unsigned char AmlCode_sspr3[];
+extern unsigned char AmlCode_sspr2[];
+extern unsigned char AmlCode_sspr1[];
+
+/* fixme: find one good way for different p_state_num */
+unsigned long acpi_add_ssdt_pstates(acpi_rsdt_t *rsdt, unsigned long current)
+{
+ device_t cpu;
+ int cpu_index = 0;
+
+ acpi_header_t *ssdt;
+
+ if(!sysconf.p_state_num) return current;
+
+ u8 *AmlCode_sspr;
+ switch(sysconf.p_state_num) {
+ case 1: AmlCode_sspr = AmlCode_sspr1; break;
+ case 2: AmlCode_sspr = AmlCode_sspr2; break;
+ case 3: AmlCode_sspr = AmlCode_sspr3; break;
+ case 4: AmlCode_sspr = AmlCode_sspr4; break;
+ default: AmlCode_sspr = AmlCode_sspr5; break;
+ }
+
+ for(cpu = all_devices; cpu; cpu = cpu->next) {
+ if ((cpu->path.type != DEVICE_PATH_APIC) ||
+ (cpu->bus->dev->path.type != DEVICE_PATH_APIC_CLUSTER)) {
+ continue;
+ }
+ if (!cpu->enabled) {
+ continue;
+ }
+ printk_debug("ACPI: pstate cpu_index=%02x, node_id=%02x, core_id=%02x\n", cpu_index, cpu->path.u.apic.node_id, cpu->path.u.apic.core_id);
+
+ current = ( current + 0x0f) & -0x10;
+ ssdt = (acpi_header_t *)current;
+ current += ((acpi_header_t *)AmlCode_sspr)->length;
+ memcpy((void *)ssdt, (void *)AmlCode_sspr, ((acpi_header_t *)AmlCode_sspr)->length);
+ update_sspr((void*)ssdt,cpu->path.u.apic.node_id, cpu_index);
+ /* recalculate checksum */
+ ssdt->checksum = 0;
+ ssdt->checksum = acpi_checksum((unsigned char *)ssdt,ssdt->length);
+ acpi_add_table(rsdt,ssdt);
+
+ cpu_index++;
+ }
+ return current;
+}
diff --git a/src/northbridge/amd/amdfam10/amdfam10_conf.c b/src/northbridge/amd/amdfam10/amdfam10_conf.c
new file mode 100644
index 0000000000..79d532fa3b
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/amdfam10_conf.c
@@ -0,0 +1,874 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#if defined(__ROMCC__)
+typedef struct sys_info sys_info_conf_t;
+#else
+typedef struct amdfam10_sysconf_t sys_info_conf_t;
+#endif
+
+struct dram_base_mask_t {
+ u32 base; //[47:27] at [28:8]
+ u32 mask; //[47:27] at [28:8] and enable at bit 0
+};
+
+static struct dram_base_mask_t get_dram_base_mask(u32 nodeid)
+{
+ device_t dev;
+ struct dram_base_mask_t d;
+#if defined(__ROMCC__)
+ dev = PCI_DEV(CBB, CDB, 1);
+#else
+ dev = __f1_dev[0];
+#endif
+
+#if EXT_CONF_SUPPORT == 1
+ // I will use ext space only for simple
+ pci_write_config32(dev, 0x110, nodeid | (1<<28)); // [47:27] at [28:8]
+ d.mask = pci_read_config32(dev, 0x114); // enable is bit 0
+ pci_write_config32(dev, 0x110, nodeid | (0<<28));
+ d.base = pci_read_config32(dev, 0x114) & 0x1fffff00; //[47:27] at [28:8];
+#else
+ u32 temp;
+ temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
+ d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
+ temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
+ d.mask |= temp<<21;
+
+ temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
+ d.mask |= (temp & 1); // enable bit
+
+ d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
+ temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
+ d.base |= temp<<21;
+#endif
+ return d;
+}
+
+static void set_dram_base_mask(u32 nodeid, struct dram_base_mask_t d, u32 nodes)
+{
+ u32 i;
+ device_t dev;
+#if EXT_CONF_SUPPORT == 1
+ // I will use ext space only for simple
+ u32 d_base_i, d_base_d, d_mask_i, d_mask_d;
+ d_base_i = nodeid | (0<<28);
+ d_base_d = d.base | nodeid; //[47:27] at [28:8];
+ d_mask_i = nodeid | (1<<28); // [47:27] at [28:8]
+ d_mask_d = d.mask; // enable is bit 0
+
+#else
+ u32 d_base_lo, d_base_hi, d_mask_lo, d_mask_hi;
+ u32 d_base_lo_reg, d_base_hi_reg, d_mask_lo_reg, d_mask_hi_reg;
+ d_mask_lo = (((d.mask<<(8+3))|(0x07<<16)) & 0xffff0000)|nodeid; // need to fill DramMask[26:24] with ones
+ d_mask_hi = (d.mask>>21) & 0xff;
+ d_base_lo = ((d.base<<(8+3)) & 0xffff0000);
+ if(d.mask & 1) d_base_lo |= 3;
+ d_base_hi = (d.base>>21) & 0xff;
+ d_mask_lo_reg = 0x44+(nodeid<<3);
+ d_mask_hi_reg = 0x144+(nodeid<<3);
+ d_base_lo_reg = 0x40+(nodeid<<3);
+ d_base_hi_reg = 0x140+(nodeid<<3);
+#endif
+
+ for(i=0;i<nodes;i++) {
+#if defined(__ROMCC__)
+ dev = NODE_PCI(i, 1);
+#else
+ dev = __f1_dev[i];
+#endif
+
+#if EXT_CONF_SUPPORT == 1
+ // I will use ext space only for simple
+ pci_write_config32(dev, 0x110, d_base_i);
+ pci_write_config32(dev, 0x114, d_base_d); //[47:27] at [28:8];
+ pci_write_config32(dev, 0x110, d_mask_i); // [47:27] at [28:8]
+ pci_write_config32(dev, 0x114, d_mask_d); // enable is bit 0
+#else
+ pci_write_config32(dev, d_mask_lo_reg, d_mask_lo); // need to fill DramMask[26:24] with ones
+ pci_write_config32(dev, d_mask_hi_reg, d_mask_hi);
+ pci_write_config32(dev, d_base_lo_reg, d_base_lo);
+ pci_write_config32(dev, d_base_hi_reg, d_base_hi);
+#endif
+ }
+
+#if defined(__ROMCC__)
+ dev = NODE_PCI(nodeid, 1);
+#else
+ dev = __f1_dev[nodeid];
+#endif
+ pci_write_config32(dev, 0x120, d.base>>8);
+ pci_write_config32(dev, 0x124, d.mask>>8);
+
+}
+
+
+static void set_DctSelBaseAddr(u32 i, u32 sel_m)
+{
+ device_t dev;
+#if defined(__ROMCC__)
+ dev = NODE_PCI(i, 2);
+#else
+ dev = __f2_dev[i];
+#endif
+ u32 dcs_lo;
+ dcs_lo = pci_read_config32(dev, DRAM_CTRL_SEL_LOW);
+ dcs_lo &= ~(DCSL_DctSelBaseAddr_47_27_MASK<<DCSL_DctSelBaseAddr_47_27_SHIFT);
+ dcs_lo |= (sel_m<<(20+DCSL_DctSelBaseAddr_47_27_SHIFT-27));
+ pci_write_config32(dev, DRAM_CTRL_SEL_LOW, dcs_lo);
+
+}
+
+
+static u32 get_DctSelBaseAddr(u32 i)
+{
+ device_t dev;
+#if defined(__ROMCC__)
+ dev = NODE_PCI(i, 2);
+#else
+ dev = __f2_dev[i];
+#endif
+ u32 sel_m;
+ u32 dcs_lo;
+ dcs_lo = pci_read_config32(dev, DRAM_CTRL_SEL_LOW);
+ dcs_lo &= DCSL_DctSelBaseAddr_47_27_MASK<<DCSL_DctSelBaseAddr_47_27_SHIFT;
+ sel_m = dcs_lo>>(20+DCSL_DctSelBaseAddr_47_27_SHIFT-27);
+ return sel_m;
+}
+
+
+static void set_DctSelHiEn(u32 i, u32 val)
+{
+ device_t dev;
+#if defined(__ROMCC__)
+ dev = NODE_PCI(i, 2);
+#else
+ dev = __f2_dev[i];
+#endif
+ u32 dcs_lo;
+ dcs_lo = pci_read_config32(dev, DRAM_CTRL_SEL_LOW);
+ dcs_lo &= ~(7);
+ dcs_lo |= (val & 7);
+ pci_write_config32(dev, DRAM_CTRL_SEL_LOW, dcs_lo);
+
+}
+
+static u32 get_DctSelHiEn(u32 i)
+{
+ device_t dev;
+#if defined(__ROMCC__)
+ dev = NODE_PCI(i, 2);
+#else
+ dev = __f2_dev[i];
+#endif
+ u32 dcs_lo;
+ dcs_lo = pci_read_config32(dev, DRAM_CTRL_SEL_LOW);
+ dcs_lo &= 7;
+ return dcs_lo;
+
+}
+
+static void set_DctSelBaseOffset(u32 i, u32 sel_off_m)
+{
+ device_t dev;
+#if defined(__ROMCC__)
+ dev = NODE_PCI(i, 2);
+#else
+ dev = __f2_dev[i];
+#endif
+ u32 dcs_hi;
+ dcs_hi = pci_read_config32(dev, DRAM_CTRL_SEL_HIGH);
+ dcs_hi &= ~(DCSH_DctSelBaseOffset_47_26_MASK<<DCSH_DctSelBaseOffset_47_26_SHIFT);
+ dcs_hi |= sel_off_m<<(20+DCSH_DctSelBaseOffset_47_26_SHIFT-26);
+ pci_write_config32(dev, DRAM_CTRL_SEL_HIGH, dcs_hi);
+
+}
+
+static u32 get_DctSelBaseOffset(u32 i)
+{
+ device_t dev;
+#if defined(__ROMCC__)
+ dev = NODE_PCI(i, 2);
+#else
+ dev = __f2_dev[i];
+#endif
+ u32 sel_off_m;
+ u32 dcs_hi;
+ dcs_hi = pci_read_config32(dev, DRAM_CTRL_SEL_HIGH);
+ dcs_hi &= DCSH_DctSelBaseOffset_47_26_MASK<<DCSH_DctSelBaseOffset_47_26_SHIFT;
+ sel_off_m = dcs_hi>>(20+DCSH_DctSelBaseOffset_47_26_SHIFT-26);
+ return sel_off_m;
+}
+#if CONFIG_AMDMCT == 0
+
+static u32 get_one_DCT(struct mem_info *meminfo)
+{
+ u32 one_DCT = 1;
+ if(meminfo->is_Width128) {
+ one_DCT = 1;
+ } else {
+ u32 dimm_mask = meminfo->dimm_mask;
+ if((dimm_mask >> DIMM_SOCKETS) && (dimm_mask & ((1<<DIMM_SOCKETS)-1))) {
+ one_DCT = 0;
+ }
+ }
+
+ return one_DCT;
+}
+#endif
+#if HW_MEM_HOLE_SIZEK != 0
+
+static u32 hoist_memory(u32 hole_startk, u32 i, u32 one_DCT, u32 nodes)
+{
+ u32 ii;
+ u32 carry_over;
+ device_t dev;
+ struct dram_base_mask_t d;
+ u32 sel_m;
+ u32 sel_hi_en;
+ u32 hoist;
+
+
+ carry_over = (4*1024*1024) - hole_startk;
+
+ for(ii=nodes - 1;ii>i;ii--) {
+ d = get_dram_base_mask(ii);
+ if(!(d.mask & 1)) continue;
+ d.base += (carry_over>>9);
+ d.mask += (carry_over>>9);
+ set_dram_base_mask(ii, d, nodes);
+
+ if(get_DctSelHiEn(ii) & 1) {
+ sel_m = get_DctSelBaseAddr(ii);
+ sel_m += carry_over>>10;
+ set_DctSelBaseAddr(ii, sel_m);
+ }
+
+ }
+ d = get_dram_base_mask(i);
+ d.mask += (carry_over>>9);
+ set_dram_base_mask(i,d, nodes);
+#if defined(__ROMCC__)
+ dev = NODE_PCI(i, 1);
+#else
+ dev = __f1_dev[i];
+#endif
+ sel_hi_en = get_DctSelHiEn(i);
+ if(sel_hi_en & 1) {
+ sel_m = get_DctSelBaseAddr(i);
+ }
+ if(d.base == (hole_startk>>9)) {
+ //don't need set memhole here, because hole off set will be 0, overflow
+ //so need to change base reg instead, new basek will be 4*1024*1024
+ d.base = (4*1024*1024)>>9;
+ set_dram_base_mask(i, d, nodes);
+
+ if(sel_hi_en & 1) {
+ sel_m += carry_over>>10;
+ set_DctSelBaseAddr(i, sel_m);
+ }
+ } else {
+ hoist = /* hole start address */
+ ((hole_startk << 10) & 0xff000000) +
+ /* enable */
+ 1;
+ if(one_DCT||(sel_m>=(hole_startk>>10))) { //one DCT or hole in DCT0
+ hoist +=
+ /* hole address to memory controller address */
+ ((((d.base<<9) + carry_over) >> 6) & 0x0000ff00) ;
+
+ if(sel_hi_en & 1) {
+ sel_m += (carry_over>>10);
+ set_DctSelBaseAddr(i, sel_m);
+ set_DctSelBaseOffset(i, sel_m);
+ }
+ } else { // hole in DCT1 range
+ hoist +=
+ /* hole address to memory controller address */
+ ((((sel_m<<10) + carry_over) >> 6) & 0x0000ff00) ;
+ // don't need to update DctSelBaseAddr
+ if(sel_hi_en & 1) {
+ set_DctSelBaseOffset(i, sel_m);
+ }
+ }
+ pci_write_config32(dev, 0xf0, hoist);
+
+ }
+
+ return carry_over;
+}
+#endif
+
+
+#if EXT_CONF_SUPPORT
+static void set_addr_map_reg_4_6_in_one_node(u32 nodeid, u32 cfg_map_dest,
+ u32 busn_min, u32 busn_max,
+ u32 type)
+{
+ device_t dev;
+ u32 i;
+ u32 tempreg;
+ u32 index_min, index_max;
+ u32 dest_min, dest_max;
+ index_min = busn_min>>2; dest_min = busn_min - (index_min<<2);
+ index_max = busn_max>>2; dest_max = busn_max - (index_max<<2);
+
+ // three case: index_min==index_max, index_min+1=index_max; index_min+1<index_max
+#if defined(__ROMCC__)
+ dev = NODE_PCI(nodeid, 1);
+#else
+ dev = __f1_dev[nodeid];
+#endif
+ if(index_min== index_max) {
+ pci_write_config32(dev, 0x110, index_min | (type<<28));
+ tempreg = pci_read_config32(dev, 0x114);
+ for(i=dest_min; i<=dest_max; i++) {
+ tempreg &= ~(0xff<<(i*8));
+ tempreg |= (cfg_map_dest<<(i*8));
+ }
+ pci_write_config32(dev, 0x110, index_min | (type<<28)); // do i need to write it again
+ pci_write_config32(dev, 0x114, tempreg);
+ } else if(index_min<index_max) {
+ pci_write_config32(dev, 0x110, index_min | (type<<28));
+ tempreg = pci_read_config32(dev, 0x114);
+ for(i=dest_min; i<=3; i++) {
+ tempreg &= ~(0xff<<(i*8));
+ tempreg |= (cfg_map_dest<<(i*8));
+ }
+ pci_write_config32(dev, 0x110, index_min | (type<<28)); // do i need to write it again
+ pci_write_config32(dev, 0x114, tempreg);
+
+ pci_write_config32(dev, 0x110, index_max | (type<<28));
+ tempreg = pci_read_config32(dev, 0x114);
+ for(i=0; i<=dest_max; i++) {
+ tempreg &= ~(0xff<<(i*8));
+ tempreg |= (cfg_map_dest<<(i*8));
+ }
+ pci_write_config32(dev, 0x110, index_max | (type<<28)); // do i need to write it again
+ pci_write_config32(dev, 0x114, tempreg);
+ if((index_max-index_min)>1) {
+ tempreg = 0;
+ for(i=0; i<=3; i++) {
+ tempreg &= ~(0xff<<(i*8));
+ tempreg |= (cfg_map_dest<<(i*8));
+ }
+ for(i=index_min+1; i<index_max;i++) {
+ pci_write_config32(dev, 0x110, i | (type<<28));
+ pci_write_config32(dev, 0x114, tempreg);
+ }
+ }
+ }
+}
+#endif
+
+static void set_config_map_reg(u32 nodeid, u32 linkn, u32 ht_c_index,
+ u32 busn_min, u32 busn_max, u32 segbit,
+ u32 nodes)
+{
+ u32 tempreg;
+ u32 i;
+ device_t dev;
+
+ busn_min>>=segbit;
+ busn_max>>=segbit;
+
+#if EXT_CONF_SUPPORT
+ if(ht_c_index < 4) {
+#endif
+ tempreg = 3 | ((nodeid&0xf)<<4) | ((nodeid & 0x30)<<(12-4))|(linkn<<8)|((busn_min & 0xff)<<16)|((busn_max&0xff)<<24);
+ for(i=0; i<nodes; i++) {
+ #if defined(__ROMCC__)
+ dev = NODE_PCI(i, 1);
+ #else
+ dev = __f1_dev[i];
+ #endif
+ pci_write_config32(dev, 0xe0 + ht_c_index * 4, tempreg);
+ }
+#if EXT_CONF_SUPPORT
+
+ return;
+ }
+
+ // if ht_c_index > 3, We should use extend space x114_x6
+ u32 cfg_map_dest;
+ u32 j;
+
+ // for nodeid at first
+ cfg_map_dest = (1<<7) | (1<<6) | (linkn<<0);
+
+ set_addr_map_reg_4_6_in_one_node(nodeid, cfg_map_dest, busn_min, busn_max, 6);
+
+ // all other nodes
+ cfg_map_dest = (1<<7) | (0<<6) | (nodeid<<0);
+ for(j = 0; j< nodes; j++) {
+ if(j== nodeid) continue;
+ set_addr_map_reg_4_6_in_one_node(j,cfg_map_dest, busn_min, busn_max, 6);
+ }
+#endif
+}
+
+static void clear_config_map_reg(u32 nodeid, u32 linkn, u32 ht_c_index,
+ u32 busn_min, u32 busn_max, u32 nodes)
+{
+ u32 i;
+ device_t dev;
+
+#if EXT_CONF_SUPPORT
+ if(ht_c_index<4) {
+#endif
+ for(i=0; i<nodes; i++) {
+ #if defined(__ROMCC__)
+ dev = NODE_PCI(i, 1);
+ #else
+ dev = __f1_dev[i];
+ #endif
+ pci_write_config32(dev, 0xe0 + ht_c_index * 4, 0);
+ }
+#if EXT_CONF_SUPPORT
+ return;
+ }
+
+ // if hc_c_index >3, We should use busn_min and busn_max to clear extend space
+ u32 cfg_map_dest;
+ u32 j;
+
+
+ // all nodes
+ cfg_map_dest = 0;
+ for(j = 0; j< nodes; j++) {
+ set_addr_map_reg_4_6_in_one_node(j,cfg_map_dest, busn_min, busn_max, 6);
+ }
+#endif
+
+}
+
+#if PCI_BUS_SEGN_BITS
+static u32 check_segn(device_t dev, u32 segbusn, u32 nodes,
+ sys_info_conf_t *sysinfo)
+{
+ //check segbusn here, We need every node have the same segn
+ if((segbusn & 0xff)>(0xe0-1)) {// use next segn
+ u32 segn = (segbusn >> 8) & 0x0f;
+ segn++;
+ segbusn = segn<<8;
+ }
+ if(segbusn>>8) {
+ u32 val;
+ val = pci_read_config32(dev, 0x160);
+ val &= ~(0xf<<25);
+ val |= (segbusn & 0xf00)<<(25-8);
+ pci_write_config32(dev, 0x160, val);
+ }
+
+ return segbusn;
+}
+#endif
+
+#if defined(__ROMCC__)
+static void set_ht_c_io_addr_reg(u32 nodeid, u32 linkn, u32 ht_c_index,
+ u32 io_min, u32 io_max, u32 nodes)
+{
+ u32 i;
+ u32 tempreg;
+ device_t dev;
+
+#if EXT_CONF_SUPPORT
+ if(ht_c_index<4) {
+#endif
+ /* io range allocation */
+ tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
+ for(i=0; i<nodes; i++) {
+ #if defined(__ROMCC__)
+ dev = NODE_PCI(i, 1);
+ #else
+ dev = __f1_dev[i];
+ #endif
+ pci_write_config32(dev, 0xC4 + ht_c_index * 8, tempreg);
+ }
+ tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
+ for(i=0; i<nodes; i++){
+ #if defined(__ROMCC__)
+ dev = NODE_PCI(i, 1);
+ #else
+ dev = __f1_dev[i];
+ #endif
+ pci_write_config32(dev, 0xC0 + ht_c_index * 8, tempreg);
+ }
+#if EXT_CONF_SUPPORT
+ return;
+ }
+
+ u32 cfg_map_dest;
+ u32 j;
+
+ // if ht_c_index > 3, We should use extend space
+
+ if(io_min>io_max) return;
+
+ // for nodeid at first
+ cfg_map_dest = (1<<7) | (1<<6) | (linkn<<0);
+
+ set_addr_map_reg_4_6_in_one_node(nodeid, cfg_map_dest, io_min, io_max, 4);
+
+ // all other nodes
+ cfg_map_dest = (1<<7) | (0<<6) | (nodeid<<0);
+ for(j = 0; j< nodes; j++) {
+ if(j== nodeid) continue;
+ set_addr_map_reg_4_6_in_one_node(j,cfg_map_dest, io_min, io_max, 4);
+ }
+#endif
+}
+
+
+static void clear_ht_c_io_addr_reg(u32 nodeid, u32 linkn, u32 ht_c_index,
+ u32 io_min, u32 io_max, u32 nodes)
+{
+ u32 i;
+ device_t dev;
+#if EXT_CONF_SUPPORT
+ if(ht_c_index<4) {
+#endif
+ /* io range allocation */
+ for(i=0; i<nodes; i++) {
+ #if defined(__ROMCC__)
+ dev = NODE_PCI(i, 1);
+ #else
+ dev = __f1_dev[i];
+ #endif
+ pci_write_config32(dev, 0xC4 + ht_c_index * 8, 0);
+ pci_write_config32(dev, 0xC0 + ht_c_index * 8, 0);
+ }
+#if EXT_CONF_SUPPORT
+ return;
+ }
+ // : if hc_c_index > 3, We should use io_min, io_max to clear extend space
+ u32 cfg_map_dest;
+ u32 j;
+
+
+ // all nodes
+ cfg_map_dest = 0;
+ for(j = 0; j< nodes; j++) {
+ set_addr_map_reg_4_6_in_one_node(j,cfg_map_dest, io_min, io_max, 4);
+ }
+#endif
+}
+#endif
+
+
+static void re_set_all_config_map_reg(u32 nodes, u32 segbit,
+ sys_info_conf_t *sysinfo)
+{
+ u32 ht_c_index;
+ device_t dev;
+
+ set_config_map_reg(0, sysinfo->sblk, 0, 0, sysinfo->ht_c_conf_bus[0]>>20, segbit, nodes);
+
+ /* clean others */
+ for(ht_c_index=1;ht_c_index<4; ht_c_index++) {
+ u32 i;
+ for(i=0; i<nodes; i++) {
+ #if defined(__ROMCC__)
+ dev = NODE_PCI(i, 1);
+ #else
+ dev = __f1_dev[i];
+ #endif
+ pci_write_config32(dev, 0xe0 + ht_c_index * 4, 0);
+ }
+ }
+#if EXT_CONF_SUPPORT
+ u32 j;
+ // clear the extend space
+ for(j = 0; j< nodes; j++) {
+ set_addr_map_reg_4_6_in_one_node(j,0, 0, 0xff, 6);
+ }
+#endif
+
+ for(ht_c_index = 1; ht_c_index<sysinfo->ht_c_num; ht_c_index++) {
+ u32 nodeid, linkn;
+ u32 busn_max;
+ u32 busn_min;
+ nodeid = (sysinfo->ht_c_conf_bus[ht_c_index] >> 2) & 0x3f;
+ linkn = (sysinfo->ht_c_conf_bus[ht_c_index]>>8) & 0x7;
+ busn_max = sysinfo->ht_c_conf_bus[ht_c_index]>>20;
+ busn_min = (sysinfo->ht_c_conf_bus[ht_c_index]>>12) & 0xff;
+ busn_min |= busn_max & 0xf00;
+ set_config_map_reg(nodeid, linkn, ht_c_index, busn_min, busn_max, segbit, nodes);
+ }
+
+}
+
+
+static u32 get_ht_c_index(u32 nodeid, u32 linkn, sys_info_conf_t *sysinfo)
+{
+ u32 tempreg;
+ u32 ht_c_index = 0;
+
+#if 0
+ tempreg = 3 | ((nodeid & 0xf) <<4) | ((nodeid & 0x30)<<(12-4)) | (linkn<<8);
+
+ for(ht_c_index=0;ht_c_index<4; ht_c_index++) {
+ reg = pci_read_config32(PCI_DEV(CBB, CDB, 1), 0xe0 + ht_c_index * 4);
+ if(((reg & 0xffff) == 0x0000)) { /*found free*/
+ break;
+ }
+ }
+#endif
+ tempreg = 3 | ((nodeid & 0x3f)<<2) | (linkn<<8);
+ for(ht_c_index=0; ht_c_index<32; ht_c_index++) {
+ if(((sysinfo->ht_c_conf_bus[ht_c_index] & 0xfff) == tempreg)){
+ return ht_c_index;
+ }
+ }
+
+ for(ht_c_index=0; ht_c_index<32; ht_c_index++) {
+ if((sysinfo->ht_c_conf_bus[ht_c_index] == 0)){
+ return ht_c_index;
+ }
+ }
+
+ return -1;
+
+}
+
+static void store_ht_c_conf_bus(u32 nodeid, u32 linkn, u32 ht_c_index,
+ u32 busn_min, u32 busn_max,
+ sys_info_conf_t *sysinfo)
+{
+ u32 val;
+ val = 3 | ((nodeid & 0x3f)<<2) | (linkn<<8);
+ sysinfo->ht_c_conf_bus[ht_c_index] = val | ((busn_min & 0xff) <<12) | (busn_max<<20); // same node need segn are same
+
+}
+
+
+static void set_BusSegmentEn(u32 node, u32 segbit)
+{
+#if PCI_BUS_SEGN_BITS
+ u32 dword;
+ device_t dev;
+
+#if defined(__ROMCC__)
+ dev = NODE_PCI(node, 0);
+#else
+ dev = __f0_dev[node];
+#endif
+
+ dword = pci_read_config32(dev, 0x68);
+ dword &= ~(7<<28);
+ dword |= (segbit<<28); /* bus segment enable */
+ pci_write_config32(dev, 0x68, dword);
+#endif
+}
+
+#if !defined(__ROMCC__)
+static u32 get_io_addr_index(u32 nodeid, u32 linkn)
+{
+ u32 index;
+
+ for(index=0; index<256; index++) {
+ if((sysconf.conf_io_addrx[index+4] == 0)){
+ sysconf.conf_io_addr[index+4] = (nodeid & 0x3f) ;
+ sysconf.conf_io_addrx[index+4] = 1 | ((linkn & 0x7)<<4);
+ return index;
+ }
+ }
+
+ return 0;
+
+}
+
+static u32 get_mmio_addr_index(u32 nodeid, u32 linkn)
+{
+ u32 index;
+
+
+ for(index=0; index<64; index++) {
+ if((sysconf.conf_mmio_addrx[index+8] == 0)){
+ sysconf.conf_mmio_addr[index+8] = (nodeid & 0x3f) ;
+ sysconf.conf_mmio_addrx[index+8] = 1 | ((linkn & 0x7)<<4);
+ return index;
+ }
+ }
+
+ return 0;
+
+}
+
+static void store_conf_io_addr(u32 nodeid, u32 linkn, u32 reg, u32 index,
+ u32 io_min, u32 io_max)
+{
+ u32 val;
+#if EXT_CONF_SUPPORT
+ if(reg!=0x110) {
+#endif
+ /* io range allocation */
+ index = (reg-0xc0)>>3;
+#if EXT_CONF_SUPPORT
+ } else {
+ index+=4;
+ }
+#endif
+
+ val = (nodeid & 0x3f); // 6 bits used
+ sysconf.conf_io_addr[index] = val | ((io_max<<8) & 0xfffff000); //limit : with nodeid
+ val = 3 | ((linkn & 0x7)<<4) ; // 8 bits used
+ sysconf.conf_io_addrx[index] = val | ((io_min<<8) & 0xfffff000); // base : with enable bit
+
+ if( sysconf.io_addr_num<(index+1))
+ sysconf.io_addr_num = index+1;
+}
+
+
+static void store_conf_mmio_addr(u32 nodeid, u32 linkn, u32 reg, u32 index,
+ u32 mmio_min, u32 mmio_max)
+{
+ u32 val;
+#if EXT_CONF_SUPPORT
+ if(reg!=0x110) {
+#endif
+ /* io range allocation */
+ index = (reg-0x80)>>3;
+#if EXT_CONF_SUPPORT
+ } else {
+ index+=8;
+ }
+#endif
+
+ val = (nodeid & 0x3f) ; // 6 bits used
+ sysconf.conf_mmio_addr[index] = val | (mmio_max & 0xffffff00); //limit : with nodeid and linkn
+ val = 3 | ((linkn & 0x7)<<4) ; // 8 bits used
+ sysconf.conf_mmio_addrx[index] = val | (mmio_min & 0xffffff00); // base : with enable bit
+
+ if( sysconf.mmio_addr_num<(index+1))
+ sysconf.mmio_addr_num = index+1;
+}
+
+
+static void set_io_addr_reg(device_t dev, u32 nodeid, u32 linkn, u32 reg,
+ u32 io_min, u32 io_max)
+{
+
+ u32 i;
+ u32 tempreg;
+#if EXT_CONF_SUPPORT
+ if(reg!=0x110) {
+#endif
+ /* io range allocation */
+ tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
+ for(i=0; i<sysconf.nodes; i++)
+ pci_write_config32(__f1_dev[i], reg+4, tempreg);
+
+ tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
+#if 0
+ // FIXME: can we use VGA reg instead?
+ if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
+ printk_spew("%s, enabling legacy VGA IO forwarding for %s link %s\n",
+ __func__, dev_path(dev), link);
+ tempreg |= PCI_IO_BASE_VGA_EN;
+ }
+ if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_NO_ISA) {
+ tempreg |= PCI_IO_BASE_NO_ISA;
+ }
+#endif
+ for(i=0; i<sysconf.nodes; i++)
+ pci_write_config32(__f1_dev[i], reg, tempreg);
+#if EXT_CONF_SUPPORT
+ return;
+ }
+
+ u32 cfg_map_dest;
+ u32 j;
+ // if ht_c_index > 3, We should use extend space
+ if(io_min>io_max) return;
+ // for nodeid at first
+ cfg_map_dest = (1<<7) | (1<<6) | (linkn<<0);
+
+ set_addr_map_reg_4_6_in_one_node(nodeid, cfg_map_dest, io_min, io_max, 4);
+
+ // all other nodes
+ cfg_map_dest = (1<<7) | (0<<6) | (nodeid<<0);
+ for(j = 0; j< sysconf.nodes; j++) {
+ if(j== nodeid) continue;
+ set_addr_map_reg_4_6_in_one_node(j,cfg_map_dest, io_min, io_max, 4);
+ }
+#endif
+
+}
+static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
+{
+
+ u32 i;
+ u32 tempreg;
+#if EXT_CONF_SUPPORT
+ if(reg!=0x110) {
+#endif
+ /* io range allocation */
+ tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
+ for(i=0; i<nodes; i++)
+ pci_write_config32(__f1_dev[i], reg+4, tempreg);
+ tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
+ for(i=0; i<sysconf.nodes; i++)
+ pci_write_config32(__f1_dev[i], reg, tempreg);
+#if EXT_CONF_SUPPORT
+ return;
+ }
+
+ device_t dev;
+ u32 j;
+ // if ht_c_index > 3, We should use extend space
+ // for nodeid at first
+ u32 enable;
+
+ if(mmio_min>mmio_max) {
+ return;
+ }
+
+ enable = 1;
+
+ dev = __f1_dev[nodeid];
+ tempreg = ((mmio_min>>3) & 0x1fffff00)| (1<<6) | (linkn<<0);
+ pci_write_config32(dev, 0x110, index | (2<<28));
+ pci_write_config32(dev, 0x114, tempreg);
+
+ tempreg = ((mmio_max>>3) & 0x1fffff00) | enable;
+ pci_write_config32(dev, 0x110, index | (3<<28));
+ pci_write_config32(dev, 0x114, tempreg);
+
+
+ // all other nodes
+ tempreg = ((mmio_min>>3) & 0x1fffff00) | (0<<6) | (nodeid<<0);
+ for(j = 0; j< sysconf.nodes; j++) {
+ if(j== nodeid) continue;
+ dev = __f1_dev[j];
+ pci_write_config32(dev, 0x110, index | (2<<28));
+ pci_write_config32(dev, 0x114, tempreg);
+ }
+
+ tempreg = ((mmio_max>>3) & 0x1fffff00) | enable;
+ for(j = 0; j< sysconf.nodes; j++) {
+ if(j==nodeid) continue;
+ dev = __f1_dev[j];
+ pci_write_config32(dev, 0x110, index | (3<<28));
+ pci_write_config32(dev, 0x114, tempreg);
+ }
+#endif
+}
+
+#endif
diff --git a/src/northbridge/amd/amdfam10/amdfam10_nums.h b/src/northbridge/amd/amdfam10/amdfam10_nums.h
new file mode 100644
index 0000000000..016921ac68
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/amdfam10_nums.h
@@ -0,0 +1,41 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AMDFAM10_NUMS_H
+
+#define AMDFAM10_NUMS_H
+
+#if CONFIG_MAX_PHYSICAL_CPUS > 8
+ #if CONFIG_MAX_PHYSICAL_CPUS > 32
+ #define NODE_NUMS 64
+ #else
+ #define NODE_NUMS 32
+ #endif
+#else
+ #define NODE_NUMS 8
+#endif
+
+// max HC installed at the same time. ...could be bigger than (48+24) if we have 3x4x4
+#define HC_NUMS 32
+
+//it could be more bigger
+#define HC_POSSIBLE_NUM 32
+
+#endif
+
diff --git a/src/northbridge/amd/amdfam10/amdfam10_pci.c b/src/northbridge/amd/amdfam10/amdfam10_pci.c
new file mode 100644
index 0000000000..0c947268fb
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/amdfam10_pci.c
@@ -0,0 +1,73 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#ifndef AMDFAM10_PCI_C
+#define AMDFAM10_PCI_C
+/* bit [10,8] are dev func, bit[1,0] are dev index */
+
+
+static u32 pci_read_config32_index(device_t dev, u32 index_reg, u32 index)
+{
+ u32 dword;
+
+ pci_write_config32(dev, index_reg, index);
+ dword = pci_read_config32(dev, index_reg+0x4);
+ return dword;
+}
+
+static void pci_write_config32_index(device_t dev, u32 index_reg, u32 index, u32 data)
+{
+
+ pci_write_config32(dev, index_reg, index);
+
+ pci_write_config32(dev, index_reg + 0x4, data);
+
+}
+
+static u32 pci_read_config32_index_wait(device_t dev, u32 index_reg, u32 index)
+{
+
+ u32 dword;
+
+ index &= ~(1<<30);
+ pci_write_config32(dev, index_reg, index);
+ do {
+ dword = pci_read_config32(dev, index_reg);
+ } while (!(dword & (1<<31)));
+ dword = pci_read_config32(dev, index_reg+0x4);
+ return dword;
+}
+
+static void pci_write_config32_index_wait(device_t dev, u32 index_reg, u32 index, u32 data)
+{
+
+ u32 dword;
+
+ pci_write_config32(dev, index_reg + 0x4, data);
+ index |= (1<<30);
+ pci_write_config32(dev, index_reg, index);
+ do {
+ dword = pci_read_config32(dev, index_reg);
+ } while (!(dword & (1<<31)));
+
+}
+#endif
+
+
diff --git a/src/northbridge/amd/amdfam10/chip.h b/src/northbridge/amd/amdfam10/chip.h
new file mode 100644
index 0000000000..b11c2a4f62
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/chip.h
@@ -0,0 +1,24 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+struct northbridge_amd_amdfam10_config
+{
+};
+
+extern struct chip_operations northbridge_amd_amdfam10_ops;
diff --git a/src/northbridge/amd/amdfam10/debug.c b/src/northbridge/amd/amdfam10/debug.c
new file mode 100644
index 0000000000..5240adfb8f
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/debug.c
@@ -0,0 +1,331 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Generic FAM10 debug code, used by mainboard specific car_auto.c
+ */
+
+#include "amdfam10_pci.c"
+
+static void udelay_tsc(u32 us);
+
+static void print_debug_addr(const char *str, void *val)
+{
+#if CACHE_AS_RAM_ADDRESS_DEBUG == 1
+ printk_debug("------Address debug: %s%x------\n", str, val);
+#endif
+}
+
+static void print_debug_pci_dev(u32 dev)
+{
+#if PCI_BUS_SEGN_BITS==0
+ printk_debug("PCI: %02x:%02x.%02x", (dev>>20) & 0xff, (dev>>15) & 0x1f, (dev>>12) & 0x7);
+#else
+ printk_debug("PCI: %04x:%02x:%02x.%02x", (dev>>28) & 0x0f, (dev>>20) & 0xff, (dev>>15) & 0x1f, (dev>>12) & 0x7);
+#endif
+}
+
+static void print_pci_devices(void)
+{
+ device_t dev;
+ for(dev = PCI_DEV(0, 0, 0);
+ dev <= PCI_DEV(0xff, 0x1f, 0x7);
+ dev += PCI_DEV(0,0,1)) {
+ u32 id;
+ id = pci_read_config32(dev, PCI_VENDOR_ID);
+ if (((id & 0xffff) == 0x0000) || ((id & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0x0000)) {
+ continue;
+ }
+ print_debug_pci_dev(dev);
+ printk_debug(" %04x:%04x\n", (id & 0xffff), (id>>16));
+ if(((dev>>12) & 0x07) == 0) {
+ u8 hdr_type;
+ hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
+ if((hdr_type & 0x80) != 0x80) {
+ dev += PCI_DEV(0,0,7);
+ }
+ }
+ }
+}
+
+static void print_pci_devices_on_bus(u32 busn)
+{
+ device_t dev;
+ for(dev = PCI_DEV(busn, 0, 0);
+ dev <= PCI_DEV(busn, 0x1f, 0x7);
+ dev += PCI_DEV(0,0,1)) {
+ u32 id;
+ id = pci_read_config32(dev, PCI_VENDOR_ID);
+ if (((id & 0xffff) == 0x0000) || ((id & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0x0000)) {
+ continue;
+ }
+ print_debug_pci_dev(dev);
+ printk_debug(" %04x:%04x\n", (id & 0xffff), (id>>16));
+ if(((dev>>12) & 0x07) == 0) {
+ u8 hdr_type;
+ hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
+ if((hdr_type & 0x80) != 0x80) {
+ dev += PCI_DEV(0,0,7);
+ }
+ }
+ }
+}
+
+
+
+static void dump_pci_device_range(u32 dev, u32 start_reg, u32 size)
+{
+ int i;
+ print_debug_pci_dev(dev);
+ int j;
+ int end = start_reg + size;
+
+ for(i = start_reg; i < end; i+=4) {
+ u32 val;
+ if ((i & 0x0f) == 0) {
+ printk_debug("\n%04x:",i);
+ }
+ val = pci_read_config32(dev, i);
+ for(j=0;j<4;j++) {
+ printk_debug(" %02x", val & 0xff);
+ val >>= 8;
+ }
+ }
+ print_debug("\n");
+}
+static void dump_pci_device(u32 dev)
+{
+ dump_pci_device_range(dev, 0, 4096);
+}
+static u32 pci_read_config32_index_wait(device_t dev, u32 index_reg, u32 index);
+static void dump_pci_device_index_wait_range(u32 dev, u32 index_reg, u32 start,
+ u32 size)
+{
+ int i;
+ int end = start + size;
+ print_debug_pci_dev(dev);
+ print_debug(" -- index_reg="); print_debug_hex32(index_reg);
+
+ for(i = start; i < end; i++) {
+ u32 val;
+ int j;
+ printk_debug("\n%02x:",i);
+ val = pci_read_config32_index_wait(dev, index_reg, i);
+ for(j=0;j<4;j++) {
+ printk_debug(" %02x", val & 0xff);
+ val >>= 8;
+ }
+
+ }
+ print_debug("\n");
+}
+static void dump_pci_device_index_wait(u32 dev, u32 index_reg)
+{
+ dump_pci_device_index_wait_range(dev, index_reg, 0, 0x54);
+ dump_pci_device_index_wait_range(dev, index_reg, 0x100, 0x08); //DIMM1 when memclk > 400Hz
+// dump_pci_device_index_wait_range(dev, index_reg, 0x200, 0x08); //DIMM2
+// dump_pci_device_index_wait_range(dev, index_reg, 0x300, 0x08); //DIMM3
+
+}
+
+static void dump_pci_device_index(u32 dev, u32 index_reg, u32 type, u32 length)
+{
+ int i;
+ print_debug_pci_dev(dev);
+
+ print_debug(" index reg: "); print_debug_hex16(index_reg); print_debug(" type: "); print_debug_hex8(type);
+
+ type<<=28;
+
+ for(i = 0; i < length; i++) {
+ u32 val;
+ if ((i & 0x0f) == 0) {
+ printk_debug("\n%02x:",i);
+ }
+ val = pci_read_config32_index(dev, index_reg, i|type);
+ printk_debug(" %08x", val);
+ }
+ print_debug("\n");
+}
+
+
+static void dump_pci_devices(void)
+{
+ device_t dev;
+ for(dev = PCI_DEV(0, 0, 0);
+ dev <= PCI_DEV(0xff, 0x1f, 0x7);
+ dev += PCI_DEV(0,0,1)) {
+ u32 id;
+ id = pci_read_config32(dev, PCI_VENDOR_ID);
+ if (((id & 0xffff) == 0x0000) || ((id & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0x0000)) {
+ continue;
+ }
+ dump_pci_device(dev);
+
+ if(((dev>>12) & 0x07) == 0) {
+ u8 hdr_type;
+ hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
+ if((hdr_type & 0x80) != 0x80) {
+ dev += PCI_DEV(0,0,7);
+ }
+ }
+ }
+}
+
+
+static void dump_pci_devices_on_bus(u32 busn)
+{
+ device_t dev;
+ for(dev = PCI_DEV(busn, 0, 0);
+ dev <= PCI_DEV(busn, 0x1f, 0x7);
+ dev += PCI_DEV(0,0,1)) {
+ u32 id;
+ id = pci_read_config32(dev, PCI_VENDOR_ID);
+ if (((id & 0xffff) == 0x0000) || ((id & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0x0000)) {
+ continue;
+ }
+ dump_pci_device(dev);
+
+ if(((dev>>12) & 0x07) == 0) {
+ u8 hdr_type;
+ hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
+ if((hdr_type & 0x80) != 0x80) {
+ dev += PCI_DEV(0,0,7);
+ }
+ }
+ }
+}
+
+#ifndef DEBUG_SMBUS
+#define DEBUG_SMBUS 0
+#endif
+
+#if DEBUG_SMBUS == 1
+
+static void dump_spd_registers(const struct mem_controller *ctrl)
+{
+ int i;
+ print_debug("\n");
+ for(i = 0; i < DIMM_SOCKETS; i++) {
+ u32 device;
+ device = ctrl->spd_addr[i];
+ if (device) {
+ int j;
+ printk_debug("dimm: %02x.0: %02x", i, device);
+ for(j = 0; j < 128; j++) {
+ int status;
+ u8 byte;
+ if ((j & 0xf) == 0) {
+ printk_debug("\n%02x: ", j);
+ }
+ status = smbus_read_byte(device, j);
+ if (status < 0) {
+ break;
+ }
+ byte = status & 0xff;
+ printk_debug("%02x ", byte);
+ }
+ print_debug("\n");
+ }
+ device = ctrl->spd_addr[i+DIMM_SOCKETS];
+ if (device) {
+ int j;
+ printk_debug("dimm: %02x.1: %02x", i, device);
+ for(j = 0; j < 128; j++) {
+ int status;
+ u8 byte;
+ if ((j & 0xf) == 0) {
+ printk_debug("\n%02x: ", j);
+ }
+ status = smbus_read_byte(device, j);
+ if (status < 0) {
+ break;
+ }
+ byte = status & 0xff;
+ printk_debug("%02x ", byte);
+ }
+ print_debug("\n");
+ }
+ }
+}
+static void dump_smbus_registers(void)
+{
+ u32 device;
+ print_debug("\n");
+ for(device = 1; device < 0x80; device++) {
+ int j;
+ if( smbus_read_byte(device, 0) < 0 ) continue;
+ printk_debug("smbus: %02x", device);
+ for(j = 0; j < 256; j++) {
+ int status;
+ u8 byte;
+ status = smbus_read_byte(device, j);
+ if (status < 0) {
+ break;
+ }
+ if ((j & 0xf) == 0) {
+ printk_debug("\n%02x: ",j);
+ }
+ byte = status & 0xff;
+ printk_debug("%02x ", byte);
+ }
+ print_debug("\n");
+ }
+}
+#endif
+static void dump_io_resources(u32 port)
+{
+
+ int i;
+ udelay_tsc(2000);
+ printk_debug("%04x:\n", port);
+ for(i=0;i<256;i++) {
+ u8 val;
+ if ((i & 0x0f) == 0) {
+ printk_debug("%02x:", i);
+ }
+ val = inb(port);
+ printk_debug(" %02x",val);
+ if ((i & 0x0f) == 0x0f) {
+ print_debug("\n");
+ }
+ port++;
+ }
+}
+
+static void dump_mem(u32 start, u32 end)
+{
+ u32 i;
+ print_debug("dump_mem:");
+ for(i=start;i<end;i++) {
+ if((i & 0xf)==0) {
+ printk_debug("\n%08x:", i);
+ }
+ printk_debug(" %02x", (u8)*((u8 *)i));
+ }
+ print_debug("\n");
+}
diff --git a/src/northbridge/amd/amdfam10/early_ht.c b/src/northbridge/amd/amdfam10/early_ht.c
new file mode 100644
index 0000000000..0ddbf9993a
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/early_ht.c
@@ -0,0 +1,175 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+// For SB HT chain only
+// mmconf is not ready yet
+static void set_bsp_node_CHtExtNodeCfgEn(void)
+{
+#if EXT_RT_TBL_SUPPORT == 1
+ u32 dword;
+ dword = pci_io_read_config32(PCI_DEV(0, 0x18, 0), 0x68);
+ dword |= (1<<27) | (1<<25);
+ /* CHtExtNodeCfgEn: coherent link extended node configuration enable,
+ Nodes[31:0] will be 0xff:[31:0], Nodes[63:32] will be 0xfe:[31:0]
+ ---- 32 nodes now only
+ It can be used even nodes less than 8 nodes.
+ We can have 8 more device on bus 0 in that case
+ */
+
+ /* CHtExtAddrEn */
+ pci_io_write_config32(PCI_DEV(0, 0x18, 0), 0x68, dword);
+ // CPU on bus 0xff and 0xfe now. For now on we can use CBB and CDB.
+#endif
+}
+
+static void enumerate_ht_chain(void)
+{
+#if HT_CHAIN_UNITID_BASE != 0
+/* HT_CHAIN_UNITID_BASE could be 0 (only one ht device in the ht chain),
+ if so, don't need to go through the chain */
+
+ /* Assumption the HT chain that is bus 0 has the HT I/O Hub on it.
+ * On most boards this just happens. If a cpu has multiple
+ * non Coherent links the appropriate bus registers for the
+ * links needs to be programed to point at bus 0.
+ */
+ unsigned next_unitid, last_unitid = 0;
+#if HT_CHAIN_END_UNITID_BASE != 0x20
+ // let't record the device of last ht device, So we can set the
+ // Unitid to HT_CHAIN_END_UNITID_BASE
+ unsigned real_last_unitid = 0;
+ u8 real_last_pos = 0;
+ int ht_dev_num = 0; // except host_bridge
+ u8 end_used = 0;
+#endif
+
+ next_unitid = HT_CHAIN_UNITID_BASE;
+ do {
+ u32 id;
+ u8 hdr_type, pos;
+ last_unitid = next_unitid;
+
+ id = pci_io_read_config32(PCI_DEV(0,0,0), PCI_VENDOR_ID);
+ /* If the chain is enumerated quit */
+ if (((id & 0xffff) == 0x0000) || ((id & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0xffff) ||
+ (((id >> 16) & 0xffff) == 0x0000))
+ {
+ break;
+ }
+
+ hdr_type = pci_io_read_config8(PCI_DEV(0,0,0), PCI_HEADER_TYPE);
+ pos = 0;
+ hdr_type &= 0x7f;
+
+ if ((hdr_type == PCI_HEADER_TYPE_NORMAL) ||
+ (hdr_type == PCI_HEADER_TYPE_BRIDGE))
+ {
+ pos = pci_io_read_config8(PCI_DEV(0,0,0), PCI_CAPABILITY_LIST);
+ }
+ while(pos != 0) {
+ u8 cap;
+ cap = pci_io_read_config8(PCI_DEV(0,0,0), pos + PCI_CAP_LIST_ID);
+ if (cap == PCI_CAP_ID_HT) {
+ u16 flags;
+ /* Read and write and reread flags so the link
+ * direction bit is valid.
+ */
+ flags = pci_io_read_config16(PCI_DEV(0,0,0), pos + PCI_CAP_FLAGS);
+ pci_io_write_config16(PCI_DEV(0,0,0), pos + PCI_CAP_FLAGS, flags);
+ flags = pci_io_read_config16(PCI_DEV(0,0,0), pos + PCI_CAP_FLAGS);
+ if ((flags >> 13) == 0) {
+ unsigned count;
+ unsigned ctrl, ctrl_off;
+ device_t devx;
+
+#if HT_CHAIN_END_UNITID_BASE != 0x20
+ if(next_unitid>=0x18) {
+ if(!end_used) {
+ next_unitid = HT_CHAIN_END_UNITID_BASE;
+ end_used = 1;
+ } else {
+ goto out;
+ }
+ }
+ real_last_unitid = next_unitid;
+ real_last_pos = pos;
+ ht_dev_num++ ;
+#endif
+ #if HT_CHAIN_END_UNITID_BASE == 0
+ if (!next_unitid)
+ goto out;
+ #endif
+ flags &= ~0x1f;
+ flags |= next_unitid & 0x1f;
+ count = (flags >> 5) & 0x1f;
+ devx = PCI_DEV(0, next_unitid, 0);
+ next_unitid += count;
+
+ pci_io_write_config16(PCI_DEV(0, 0, 0), pos + PCI_CAP_FLAGS, flags);
+
+ /* Test for end of chain */
+ ctrl_off = ((flags >> 10) & 1)?
+ PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
+
+ do {
+ ctrl = pci_read_config16(devx, pos + ctrl_off);
+ /* Is this the end of the hypertransport chain? */
+ if (ctrl & (1 << 6)) {
+ goto out;
+ }
+
+ if (ctrl & ((1 << 4) | (1 << 8))) {
+ /*
+ * Either the link has failed, or we have
+ * a CRC error.
+ * Sometimes this can happen due to link
+ * retrain, so lets knock it down and see
+ * if its transient
+ */
+ ctrl |= ((1 << 4) | (1 <<8)); // Link fail + Crc
+ pci_write_config16(devx, pos + ctrl_off, ctrl);
+ ctrl = pci_read_config16(devx, pos + ctrl_off);
+ if (ctrl & ((1 << 4) | (1 << 8))) {
+ // can not clear the error
+ break;
+ }
+ }
+ } while((ctrl & (1 << 5)) == 0);
+
+ break;
+ }
+ }
+ pos = pci_io_read_config8(PCI_DEV(0, 0, 0), pos + PCI_CAP_LIST_NEXT);
+ }
+ } while(last_unitid != next_unitid);
+
+out: ;
+#if HT_CHAIN_END_UNITID_BASE != 0x20
+ if((ht_dev_num>1) && (real_last_unitid != HT_CHAIN_END_UNITID_BASE) && !end_used) {
+ u16 flags;
+ flags = pci_io_read_config16(PCI_DEV(0,real_last_unitid,0), real_last_pos + PCI_CAP_FLAGS);
+ flags &= ~0x1f;
+ flags |= HT_CHAIN_END_UNITID_BASE & 0x1f;
+ pci_io_write_config16(PCI_DEV(0, real_last_unitid, 0), real_last_pos + PCI_CAP_FLAGS, flags);
+ }
+#endif
+
+#endif
+}
diff --git a/src/northbridge/amd/amdfam10/get_pci1234.c b/src/northbridge/amd/amdfam10/get_pci1234.c
new file mode 100644
index 0000000000..0ef4b73bed
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/get_pci1234.c
@@ -0,0 +1,118 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <console/console.h>
+#include <device/pci.h>
+#include <device/pci_ids.h>
+#include <string.h>
+#include <stdint.h>
+
+#include <cpu/amd/amdfam10_sysconf.h>
+
+
+/* Need pci1234 array
+ * pci1234[0] will record sblink and bus range
+ * pci1234[i] will record ht chain i.
+ * It will keep the sequence when some ht io card is not installed.
+ *
+ * 1n: 8
+ * 2n: 7x2
+ * 3n: 6x3
+ * 4n: 5x4
+ * 5n: 4x5
+ * 6n: 3x6
+ * 7n: 2x7
+ * 8n: 1x8
+ *
+ * 8n(4x2): 8x4
+ * 16n(4x4): 16*2
+ * 20n(4x5): 20x1
+ * 32n(4x4+4x4): 16x1
+ *
+ * Total: xxx: I just want to use 32 instead, If you have more, you may need to
+ * reset HC_POSSIBLE_NUM and update ssdt.dsl (hcdn, hclk)
+ *
+ * Put all the possible ht node/link to the list tp pci1234[] in get_bus_conf.c
+ * on MB dir. Also, don't forget to increase the ACPI_SSDTX_NUM etc if you have
+ * too much SSDT. How about co-processor on socket 1 on 2 way system.
+ * or socket 2, and socket3 on 4 way system? treat that as one hc too!
+ *
+ */
+
+
+void get_pci1234(void)
+{
+
+ int i,j;
+ u32 dword;
+
+ dword = sysconf.sblk<<8;
+ dword |= 1;
+ sysconf.pci1234[0] = dword; // sblink
+ sysconf.hcid[0] = 0;
+
+ /* about hardcode numbering for HT_IO support
+ set the node_id and link_id that could have ht chain in the one array,
+ then check if is enabled.... then update final value
+ */
+
+ //here we need to set hcdn
+ //1. hypertransport.c need to record hcdn_reg together with 0xe0, 0xe4, 0xe8, 0xec when are set
+ //2. so at the same time we need update hsdn with hcdn_reg here
+// printk_debug("sysconf.ht_c_num = %02d\n", sysconf.ht_c_num);
+
+ for(j=0;j<sysconf.ht_c_num;j++) {
+ u32 dwordx;
+ dwordx = sysconf.ht_c_conf_bus[j];
+// printk_debug("sysconf.ht_c_conf_bus[%02d] = %08x\n", j, sysconf.ht_c_conf_bus[j]);
+ dwordx &=0xfffffffd; //keep bus num, node_id, link_num, enable bits
+ if((dwordx & 0x7fd) == dword) { //SBLINK
+ sysconf.pci1234[0] = dwordx;
+ sysconf.hcdn[0] = sysconf.hcdn_reg[j];
+ continue;
+ }
+ if((dwordx & 1)) {
+ // We need to find out the number of HC
+ // for exact match
+ for(i=1;i<sysconf.hc_possible_num;i++) {
+ if((dwordx & 0x7fc) == (sysconf.pci1234[i] & 0x7fc)) { // same node and same linkn
+ sysconf.pci1234[i] = dwordx;
+ sysconf.hcdn[i] = sysconf.hcdn_reg[j];
+ break;
+ }
+ }
+ // for 0xffc match or same node
+ for(i=1;i<sysconf.hc_possible_num;i++) {
+ if((dwordx & 0x7fc) == (dwordx & sysconf.pci1234[i] & 0x7fc)) {
+ sysconf.pci1234[i] = dwordx;
+ sysconf.hcdn[i] = sysconf.hcdn_reg[j];
+ break;
+ }
+ }
+ }
+ }
+
+ for(i=1;i<sysconf.hc_possible_num;i++) {
+ if(!(sysconf.pci1234[i] & 1)) {
+ sysconf.pci1234[i] = 0;
+ sysconf.hcdn[i] = 0x20202020;
+ }
+ sysconf.hcid[i] = 0;
+ }
+}
diff --git a/src/northbridge/amd/amdfam10/misc_control.c b/src/northbridge/amd/amdfam10/misc_control.c
new file mode 100644
index 0000000000..8e8f39ab8d
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/misc_control.c
@@ -0,0 +1,155 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2003 by Eric Biederman
+ * Copyright (C) Stefan Reinauer
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Turn off machine check triggers when reading
+ * pci space where there are no devices.
+ * This is necessary when scaning the bus for
+ * devices which is done by the kernel
+ */
+
+#include <console/console.h>
+#include <device/device.h>
+#include <device/pci.h>
+#include <device/pci_ids.h>
+#include <device/pci_ops.h>
+#include <part/hard_reset.h>
+#include <pc80/mc146818rtc.h>
+#include <bitops.h>
+#include <cpu/amd/model_10xxx_rev.h>
+
+#include "amdfam10.h"
+
+/**
+ * @brief Read resources for AGP aperture
+ *
+ * @param
+ *
+ * There is only one AGP aperture resource needed. The resoruce is added to
+ * the northbridge of BSP.
+ *
+ * The same trick can be used to augment legacy VGA resources which can
+ * be detect by generic pci reousrce allocator for VGA devices.
+ * BAD: it is more tricky than I think, the resource allocation code is
+ * implemented in a way to NOT DOING legacy VGA resource allcation on
+ * purpose :-(.
+ */
+static void mcf3_read_resources(device_t dev)
+{
+ struct resource *resource;
+ unsigned char iommu;
+ /* Read the generic PCI resources */
+ pci_dev_read_resources(dev);
+
+ /* If we are not the first processor don't allocate the gart apeture */
+ if (dev->path.u.pci.devfn != PCI_DEVFN(CDB, 3)) {
+ return;
+ }
+
+ iommu = 1;
+ get_option(&iommu, "iommu");
+
+ if (iommu) {
+ /* Add a Gart apeture resource */
+ resource = new_resource(dev, 0x94);
+ resource->size = iommu?AGP_APERTURE_SIZE:1;
+ resource->align = log2(resource->size);
+ resource->gran = log2(resource->size);
+ resource->limit = 0xffffffff; /* 4G */
+ resource->flags = IORESOURCE_MEM;
+ }
+}
+
+static void set_agp_aperture(device_t dev)
+{
+ struct resource *resource;
+
+ resource = probe_resource(dev, 0x94);
+ if (resource) {
+ device_t pdev;
+ u32 gart_base, gart_acr;
+
+ /* Remember this resource has been stored */
+ resource->flags |= IORESOURCE_STORED;
+
+ /* Find the size of the GART aperture */
+ gart_acr = (0<<6)|(0<<5)|(0<<4)|((resource->gran - 25) << 1)|(0<<0);
+
+ /* Get the base address */
+ gart_base = ((resource->base) >> 25) & 0x00007fff;
+
+ /* Update the other northbriges */
+ pdev = 0;
+ while((pdev = dev_find_device(PCI_VENDOR_ID_AMD, 0x1203, pdev))) {
+ /* Store the GART size but don't enable it */
+ pci_write_config32(pdev, 0x90, gart_acr);
+
+ /* Store the GART base address */
+ pci_write_config32(pdev, 0x94, gart_base);
+
+ /* Don't set the GART Table base address */
+ pci_write_config32(pdev, 0x98, 0);
+
+ /* Report the resource has been stored... */
+ report_resource_stored(pdev, resource, " <gart>");
+ }
+ }
+}
+
+static void mcf3_set_resources(device_t dev)
+{
+ /* Set the gart apeture */
+ set_agp_aperture(dev);
+
+ /* Set the generic PCI resources */
+ pci_dev_set_resources(dev);
+}
+
+static void misc_control_init(struct device *dev)
+{
+ u32 cmd;
+
+ printk_debug("NB: Function 3 Misc Control.. ");
+
+ /* Disable Machine checks from Invalid Locations.
+ * This is needed for PC backwards compatibility.
+ */
+ cmd = pci_read_config32(dev, 0x44);
+ cmd |= (1<<6) | (1<<25);
+ pci_write_config32(dev, 0x44, cmd );
+
+ printk_debug("done.\n");
+}
+
+
+static struct device_operations mcf3_ops = {
+ .read_resources = mcf3_read_resources,
+ .set_resources = mcf3_set_resources,
+ .enable_resources = pci_dev_enable_resources,
+ .init = misc_control_init,
+ .scan_bus = 0,
+ .ops_pci = 0,
+};
+
+static struct pci_driver mcf3_driver __pci_driver = {
+ .ops = &mcf3_ops,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = 0x1203,
+};
diff --git a/src/northbridge/amd/amdfam10/northbridge.c b/src/northbridge/amd/amdfam10/northbridge.c
new file mode 100644
index 0000000000..79e52b82b8
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/northbridge.c
@@ -0,0 +1,1477 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <console/console.h>
+#include <arch/io.h>
+#include <stdint.h>
+#include <device/device.h>
+#include <device/pci.h>
+#include <device/pci_ids.h>
+#include <device/hypertransport.h>
+#include <stdlib.h>
+#include <string.h>
+#include <bitops.h>
+#include <cpu/cpu.h>
+
+#include <cpu/x86/lapic.h>
+
+#if CONFIG_LOGICAL_CPUS==1
+#include <cpu/amd/quadcore.h>
+#include <pc80/mc146818rtc.h>
+#endif
+
+#include "chip.h"
+#include "root_complex/chip.h"
+#include "northbridge.h"
+
+#include "amdfam10.h"
+
+#if HW_MEM_HOLE_SIZEK != 0
+#include <cpu/amd/model_10xxx_rev.h>
+#endif
+
+#include <cpu/amd/amdfam10_sysconf.h>
+
+struct amdfam10_sysconf_t sysconf;
+
+#define FX_DEVS NODE_NUMS
+static device_t __f0_dev[FX_DEVS];
+static device_t __f1_dev[FX_DEVS];
+static device_t __f2_dev[FX_DEVS];
+static device_t __f4_dev[FX_DEVS];
+
+device_t get_node_pci(u32 nodeid, u32 fn)
+{
+#if NODE_NUMS == 64
+ if(nodeid<32) {
+ return dev_find_slot(CBB, PCI_DEVFN(CDB + nodeid, fn));
+ } else {
+ return dev_find_slot(CBB-1, PCI_DEVFN(CDB + nodeid - 32, fn));
+ }
+
+#else
+ return dev_find_slot(CBB, PCI_DEVFN(CDB + nodeid, fn));
+#endif
+
+}
+static void get_fx_devs(void)
+{
+ int i;
+ if (__f1_dev[0]) {
+ return;
+ }
+ for(i = 0; i < FX_DEVS; i++) {
+ __f0_dev[i] = get_node_pci(i, 0);
+ __f1_dev[i] = get_node_pci(i, 1);
+ __f2_dev[i] = get_node_pci(i, 2);
+ __f4_dev[i] = get_node_pci(i, 4);
+ }
+ if (!__f1_dev[0]) {
+ printk_err("Cannot find %02x:%02x.1", CBB, CDB);
+ die("Cannot go on\n");
+ }
+}
+
+static u32 f1_read_config32(u32 reg)
+{
+ get_fx_devs();
+ return pci_read_config32(__f1_dev[0], reg);
+}
+
+static void f1_write_config32(u32 reg, u32 value)
+{
+ int i;
+ get_fx_devs();
+ for(i = 0; i < FX_DEVS; i++) {
+ device_t dev;
+ dev = __f1_dev[i];
+ if (dev && dev->enabled) {
+ pci_write_config32(dev, reg, value);
+ }
+ }
+}
+
+
+static u32 amdfam10_nodeid(device_t dev)
+{
+#if NODE_NUMS == 64
+ unsigned busn;
+ busn = dev->bus->secondary;
+ if(busn != CBB) {
+ return (dev->path.u.pci.devfn >> 3) - CDB + 32;
+ } else {
+ return (dev->path.u.pci.devfn >> 3) - CDB;
+ }
+
+#else
+ return (dev->path.u.pci.devfn >> 3) - CDB;
+#endif
+}
+
+#include "amdfam10_conf.c"
+
+static void set_vga_enable_reg(u32 nodeid, u32 linkn)
+{
+ u32 val;
+
+ val = 1 | (nodeid<<4) | (linkn<<12);
+ /* it will routing (1)mmio 0xa0000:0xbffff (2) io 0x3b0:0x3bb,
+ 0x3c0:0x3df */
+ f1_write_config32(0xf4, val);
+
+}
+
+static u32 amdfam10_scan_chain(device_t dev, u32 nodeid, u32 link, u32 sblink,
+ u32 max, u32 offset_unitid)
+{
+// I want to put sb chain in bus 0 can I?
+
+
+ u32 link_type;
+ int i;
+ u32 ht_c_index;
+ u32 ht_unitid_base[4]; // here assume only 4 HT device on chain
+ u32 max_bus;
+ u32 min_bus;
+ u32 is_sublink1 = (link>3);
+ device_t devx;
+ u32 busses;
+ u32 segn = max>>8;
+ u32 busn = max&0xff;
+ u32 max_devfn;
+
+#if HT3_SUPPORT==1
+ if(is_sublink1) {
+ u32 regpos;
+ u32 reg;
+ regpos = 0x170 + 4 * (link&3); // it is only on sublink0
+ reg = pci_read_config32(dev, regpos);
+ if(reg & 1) return max; // already ganged no sblink1
+ devx = get_node_pci(nodeid, 4);
+ } else
+#endif
+ devx = dev;
+
+
+ dev->link[link].cap = 0x80 + ((link&3) *0x20);
+ do {
+ link_type = pci_read_config32(devx, dev->link[link].cap + 0x18);
+ } while(link_type & ConnectionPending);
+ if (!(link_type & LinkConnected)) {
+ return max;
+ }
+ do {
+ link_type = pci_read_config32(devx, dev->link[link].cap + 0x18);
+ } while(!(link_type & InitComplete));
+ if (!(link_type & NonCoherent)) {
+ return max;
+ }
+ /* See if there is an available configuration space mapping
+ * register in function 1.
+ */
+ ht_c_index = get_ht_c_index(nodeid, link, &sysconf);
+
+#if EXT_CONF_SUPPORT == 0
+ if(ht_c_index>=4) return max;
+#endif
+
+ /* Set up the primary, secondary and subordinate bus numbers.
+ * We have no idea how many busses are behind this bridge yet,
+ * so we set the subordinate bus number to 0xff for the moment.
+ */
+
+#if SB_HT_CHAIN_ON_BUS0 > 0
+ // first chain will on bus 0
+ if((nodeid == 0) && (sblink==link)) { // actually max is 0 here
+ min_bus = max;
+ }
+ #if SB_HT_CHAIN_ON_BUS0 > 1
+ // second chain will be on 0x40, third 0x80, forth 0xc0
+ // i would refined that to 2, 3, 4 ==> 0, 0x, 40, 0x80, 0xc0
+ // >4 will use more segments, We can have 16 segmment and every segment have 256 bus, For that case need the kernel support mmio pci config.
+ else {
+ min_bus = ((busn>>3) + 1) << 3; // one node can have 8 link and segn is the same
+ }
+ max = min_bus | (segn<<8);
+ #else
+ //other ...
+ else {
+ min_bus = ++max;
+ }
+ #endif
+#else
+ min_bus = ++max;
+#endif
+ max_bus = 0xfc | (segn<<8);
+
+ dev->link[link].secondary = min_bus;
+ dev->link[link].subordinate = max_bus;
+ /* Read the existing primary/secondary/subordinate bus
+ * number configuration.
+ */
+ busses = pci_read_config32(devx, dev->link[link].cap + 0x14);
+
+ /* Configure the bus numbers for this bridge: the configuration
+ * transactions will not be propagates by the bridge if it is
+ * not correctly configured
+ */
+ busses &= 0xffff00ff;
+ busses |= ((u32)(dev->link[link].secondary) << 8);
+ pci_write_config32(devx, dev->link[link].cap + 0x14, busses);
+
+
+ /* set the config map space */
+
+ set_config_map_reg(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, sysconf.segbit, sysconf.nodes);
+
+ /* Now we can scan all of the subordinate busses i.e. the
+ * chain on the hypertranport link
+ */
+ for(i=0;i<4;i++) {
+ ht_unitid_base[i] = 0x20;
+ }
+
+ //if ext conf is enabled, only need use 0x1f
+ if (min_bus == 0)
+ max_devfn = (0x17<<3) | 7;
+ else
+ max_devfn = (0x1f<<3) | 7;
+
+ max = hypertransport_scan_chain(&dev->link[link], 0, max_devfn, max, ht_unitid_base, offset_unitid);
+
+
+ /* We know the number of busses behind this bridge. Set the
+ * subordinate bus number to it's real value
+ */
+ if(ht_c_index>3) { // clear the extend reg
+ clear_config_map_reg(nodeid, link, ht_c_index, (max+1)>>sysconf.segbit, (dev->link[link].subordinate)>>sysconf.segbit, sysconf.nodes);
+ }
+
+ dev->link[link].subordinate = max;
+ set_config_map_reg(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, sysconf.segbit, sysconf.nodes);
+ sysconf.ht_c_num++;
+
+ {
+ // config config_reg, and ht_unitid_base to update hcdn_reg;
+ u32 temp = 0;
+ for(i=0;i<4;i++) {
+ temp |= (ht_unitid_base[i] & 0xff) << (i*8);
+ }
+
+ sysconf.hcdn_reg[ht_c_index] = temp;
+
+ }
+
+ store_ht_c_conf_bus(nodeid, link, ht_c_index, dev->link[link].secondary, dev->link[link].subordinate, &sysconf);
+
+
+ return max;
+}
+
+static u32 amdfam10_scan_chains(device_t dev, u32 max)
+{
+ u32 nodeid;
+ u32 link;
+ u32 sblink = sysconf.sblk;
+ u32 offset_unitid = 0;
+
+ nodeid = amdfam10_nodeid(dev);
+
+
+// Put sb chain in bus 0
+#if SB_HT_CHAIN_ON_BUS0 > 0
+ if(nodeid==0) {
+ #if ((HT_CHAIN_UNITID_BASE != 1) || (HT_CHAIN_END_UNITID_BASE != 0x20))
+ offset_unitid = 1;
+ #endif
+ max = amdfam10_scan_chain(dev, nodeid, sblink, sblink, max, offset_unitid ); // do sb ht chain at first, in case s2885 put sb chain (8131/8111) on link2, but put 8151 on link0
+ }
+#endif
+
+
+#if PCI_BUS_SEGN_BITS
+ max = check_segn(dev, max, sysconf.nodes, &sysconf);
+#endif
+
+
+ for(link = 0; link < dev->links; link++) {
+#if SB_HT_CHAIN_ON_BUS0 > 0
+ if( (nodeid == 0) && (sblink == link) ) continue; //already done
+#endif
+ offset_unitid = 0;
+ #if ((HT_CHAIN_UNITID_BASE != 1) || (HT_CHAIN_END_UNITID_BASE != 0x20))
+ #if SB_HT_CHAIN_UNITID_OFFSET_ONLY == 1
+ if((nodeid == 0) && (sblink == link))
+ #endif
+ offset_unitid = 1;
+ #endif
+
+ max = amdfam10_scan_chain(dev, nodeid, link, sblink, max, offset_unitid);
+ }
+ return max;
+}
+
+
+static int reg_useable(u32 reg,device_t goal_dev, u32 goal_nodeid,
+ u32 goal_link)
+{
+ struct resource *res;
+ u32 nodeid, link;
+ int result;
+ res = 0;
+ for(nodeid = 0; !res && (nodeid < NODE_NUMS); nodeid++) {
+ device_t dev;
+ dev = __f0_dev[nodeid];
+ for(link = 0; !res && (link < 8); link++) {
+ res = probe_resource(dev, 0x1000 + reg + (link<<16)); // 8 links, 0x1000 man f1,
+ }
+ }
+ result = 2;
+ if (res) {
+ result = 0;
+ if ( (goal_link == (link - 1)) &&
+ (goal_nodeid == (nodeid - 1)) &&
+ (res->flags <= 1)) {
+ result = 1;
+ }
+ }
+ return result;
+}
+
+static struct resource *amdfam10_find_iopair(device_t dev, u32 nodeid, u32 link)
+{
+ struct resource *resource;
+ u32 free_reg, reg;
+ resource = 0;
+ free_reg = 0;
+ for(reg = 0xc0; reg <= 0xd8; reg += 0x8) {
+ int result;
+ result = reg_useable(reg, dev, nodeid, link);
+ if (result == 1) {
+ /* I have been allocated this one */
+ break;
+ }
+ else if (result > 1) {
+ /* I have a free register pair */
+ free_reg = reg;
+ }
+ }
+ if (reg > 0xd8) {
+ reg = free_reg; // if no free, the free_reg still be 0
+ }
+
+ //Ext conf space
+ if(!reg) {
+ //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range
+ u32 index = get_io_addr_index(nodeid, link);
+ reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255
+ }
+
+ resource = new_resource(dev, 0x1000 + reg + (link<<16));
+
+ return resource;
+}
+
+static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link)
+{
+ struct resource *resource;
+ u32 free_reg, reg;
+ resource = 0;
+ free_reg = 0;
+ for(reg = 0x80; reg <= 0xb8; reg += 0x8) {
+ int result;
+ result = reg_useable(reg, dev, nodeid, link);
+ if (result == 1) {
+ /* I have been allocated this one */
+ break;
+ }
+ else if (result > 1) {
+ /* I have a free register pair */
+ free_reg = reg;
+ }
+ }
+ if (reg > 0xb8) {
+ reg = free_reg;
+ }
+
+ //Ext conf space
+ if(!reg) {
+ //because of Extend conf space, we will never run out of reg,
+ // but we need one index to differ them. so same node and
+ // same link can have multi range
+ u32 index = get_mmio_addr_index(nodeid, link);
+ reg = 0x110+ (index<<24) + (6<<20); // index could be 0, 63
+
+ }
+ resource = new_resource(dev, 0x1000 + reg + (link<<16));
+ return resource;
+}
+
+
+static void amdfam10_link_read_bases(device_t dev, u32 nodeid, u32 link)
+{
+ struct resource *resource;
+
+ /* Initialize the io space constraints on the current bus */
+ resource = amdfam10_find_iopair(dev, nodeid, link);
+ if (resource) {
+ u32 align;
+#if EXT_CONF_SUPPORT == 1
+ if((resource->index & 0x1fff) == 0x1110) { // ext
+ align = 8;
+ }
+ else
+#endif
+ align = log2(HT_IO_HOST_ALIGN);
+ resource->base = 0;
+ resource->size = 0;
+ resource->align = align;
+ resource->gran = align;
+ resource->limit = 0xffffUL;
+ resource->flags = IORESOURCE_IO;
+ compute_allocate_resource(&dev->link[link], resource,
+ IORESOURCE_IO, IORESOURCE_IO);
+ }
+
+ /* Initialize the prefetchable memory constraints on the current bus */
+ resource = amdfam10_find_mempair(dev, nodeid, link);
+ if (resource) {
+ resource->base = 0;
+ resource->size = 0;
+ resource->align = log2(HT_MEM_HOST_ALIGN);
+ resource->gran = log2(HT_MEM_HOST_ALIGN);
+ resource->limit = 0xffffffffffULL;
+ resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ compute_allocate_resource(&dev->link[link], resource,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH);
+
+#if EXT_CONF_SUPPORT == 1
+ if((resource->index & 0x1fff) == 0x1110) { // ext
+ normalize_resource(resource);
+ }
+#endif
+
+ }
+
+ /* Initialize the memory constraints on the current bus */
+ resource = amdfam10_find_mempair(dev, nodeid, link);
+ if (resource) {
+ resource->base = 0;
+ resource->size = 0;
+ resource->align = log2(HT_MEM_HOST_ALIGN);
+ resource->gran = log2(HT_MEM_HOST_ALIGN);
+ resource->limit = 0xffffffffffULL;
+ resource->flags = IORESOURCE_MEM;
+ compute_allocate_resource(&dev->link[link], resource,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH,
+ IORESOURCE_MEM);
+
+#if EXT_CONF_SUPPORT == 1
+ if((resource->index & 0x1fff) == 0x1110) { // ext
+ normalize_resource(resource);
+ }
+#endif
+
+ }
+}
+
+
+static void amdfam10_read_resources(device_t dev)
+{
+ u32 nodeid, link;
+
+ nodeid = amdfam10_nodeid(dev);
+ for(link = 0; link < dev->links; link++) {
+ if (dev->link[link].children) {
+ amdfam10_link_read_bases(dev, nodeid, link);
+ }
+ }
+}
+
+
+static void amdfam10_set_resource(device_t dev, struct resource *resource,
+ u32 nodeid)
+{
+ resource_t rbase, rend;
+ unsigned reg, link;
+ char buf[50];
+
+ /* Make certain the resource has actually been set */
+ if (!(resource->flags & IORESOURCE_ASSIGNED)) {
+ return;
+ }
+
+ /* If I have already stored this resource don't worry about it */
+ if (resource->flags & IORESOURCE_STORED) {
+ return;
+ }
+
+ /* Only handle PCI memory and IO resources */
+ if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
+ return;
+
+ /* Ensure I am actually looking at a resource of function 1 */
+ if ((resource->index & 0xffff) < 0x1000) {
+ return;
+ }
+ /* Get the base address */
+ rbase = resource->base;
+
+ /* Get the limit (rounded up) */
+ rend = resource_end(resource);
+
+ /* Get the register and link */
+ reg = resource->index & 0xfff; // 4k
+ link = ( resource->index>> 16)& 0x7; // 8 links
+
+ if (resource->flags & IORESOURCE_IO) {
+ compute_allocate_resource(&dev->link[link], resource,
+ IORESOURCE_IO, IORESOURCE_IO);
+
+ set_io_addr_reg(dev, nodeid, link, reg, rbase>>8, rend>>8);
+ store_conf_io_addr(nodeid, link, reg, (resource->index >> 24), rbase>>8, rend>>8);
+ }
+ else if (resource->flags & IORESOURCE_MEM) {
+ compute_allocate_resource(&dev->link[link], resource,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH,
+ resource->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH));
+ set_mmio_addr_reg(nodeid, link, reg, (resource->index >>24), rbase>>8, rend>>8, sysconf.nodes) ;// [39:8]
+ store_conf_mmio_addr(nodeid, link, reg, (resource->index >>24), rbase>>8, rend>>8);
+ }
+ resource->flags |= IORESOURCE_STORED;
+ sprintf(buf, " <node %02x link %02x>",
+ nodeid, link);
+ report_resource_stored(dev, resource, buf);
+}
+
+/**
+ *
+ * I tried to reuse the resource allocation code in amdfam10_set_resource()
+ * but it is too diffcult to deal with the resource allocation magic.
+ */
+#if CONFIG_CONSOLE_VGA_MULTI == 1
+extern device_t vga_pri; // the primary vga device, defined in device.c
+#endif
+
+static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid)
+{
+ unsigned link;
+
+ /* find out which link the VGA card is connected,
+ * we only deal with the 'first' vga card */
+ for (link = 0; link < dev->links; link++) {
+ if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
+#if CONFIG_CONSOLE_VGA_MULTI == 1
+ printk_debug("VGA: vga_pri bus num = %d dev->link[link] bus range [%d,%d]\n", vga_pri->bus->secondary,
+ dev->link[link].secondary,dev->link[link].subordinate);
+ /* We need to make sure the vga_pri is under the link */
+ if((vga_pri->bus->secondary >= dev->link[link].secondary ) &&
+ (vga_pri->bus->secondary <= dev->link[link].subordinate )
+ )
+#endif
+ break;
+ }
+ }
+
+ /* no VGA card installed */
+ if (link == dev->links)
+ return;
+
+ printk_debug("VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link);
+ set_vga_enable_reg(nodeid, link);
+}
+
+static void amdfam10_set_resources(device_t dev)
+{
+ u32 nodeid, link;
+ int i;
+
+ /* Find the nodeid */
+ nodeid = amdfam10_nodeid(dev);
+
+ amdfam10_create_vga_resource(dev, nodeid);
+
+ /* Set each resource we have found */
+ for(i = 0; i < dev->resources; i++) {
+ amdfam10_set_resource(dev, &dev->resource[i], nodeid);
+ }
+
+ for(link = 0; link < dev->links; link++) {
+ struct bus *bus;
+ bus = &dev->link[link];
+ if (bus->children) {
+ assign_resources(bus);
+ }
+ }
+}
+
+
+static void amdfam10_enable_resources(device_t dev)
+{
+ pci_dev_enable_resources(dev);
+ enable_childrens_resources(dev);
+}
+
+static void mcf0_control_init(struct device *dev)
+{
+}
+
+static struct device_operations northbridge_operations = {
+ .read_resources = amdfam10_read_resources,
+ .set_resources = amdfam10_set_resources,
+ .enable_resources = amdfam10_enable_resources,
+ .init = mcf0_control_init,
+ .scan_bus = amdfam10_scan_chains,
+ .enable = 0,
+ .ops_pci = 0,
+};
+
+
+static struct pci_driver mcf0_driver __pci_driver = {
+ .ops = &northbridge_operations,
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = 0x1200,
+};
+
+#if CONFIG_CHIP_NAME == 1
+
+struct chip_operations northbridge_amd_amdfam10_ops = {
+ CHIP_NAME("AMD FAM10 Northbridge")
+ .enable_dev = 0,
+};
+
+#endif
+
+static void pci_domain_read_resources(device_t dev)
+{
+ struct resource *resource;
+ unsigned reg;
+ unsigned link;
+
+ /* Find the already assigned resource pairs */
+ get_fx_devs();
+ for(reg = 0x80; reg <= 0xd8; reg+= 0x08) {
+ u32 base, limit;
+ base = f1_read_config32(reg);
+ limit = f1_read_config32(reg + 0x04);
+ /* Is this register allocated? */
+ if ((base & 3) != 0) {
+ unsigned nodeid, link;
+ device_t dev;
+ if(reg<0xc0) { // mmio
+ nodeid = (limit & 0xf) + (base&0x30);
+ } else { // io
+ nodeid = (limit & 0xf) + ((base>>4)&0x30);
+ }
+ link = (limit >> 4) & 7;
+ dev = __f0_dev[nodeid];
+ if (dev) {
+ /* Reserve the resource */
+ struct resource *resource;
+ resource = new_resource(dev, 0x1000 + reg + (link<<16));
+ if (resource) {
+ resource->flags = 1;
+ }
+ }
+ }
+ }
+ /* FIXME: do we need to check extend conf space?
+ I don't believe that much preset value */
+
+#if CONFIG_PCI_64BIT_PREF_MEM == 0
+ /* Initialize the system wide io space constraints */
+ resource = new_resource(dev, IOINDEX_SUBTRACTIVE(0, 0));
+ resource->base = 0x400;
+ resource->limit = 0xffffUL;
+ resource->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
+
+ /* Initialize the system wide memory resources constraints */
+ resource = new_resource(dev, IOINDEX_SUBTRACTIVE(1, 0));
+ resource->limit = 0xfcffffffffULL;
+ resource->flags = IORESOURCE_MEM | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
+#else
+ for(link=0; link<dev->links; link++) {
+ /* Initialize the system wide io space constraints */
+ resource = new_resource(dev, 0|(link<<2));
+ resource->base = 0x400;
+ resource->limit = 0xffffUL;
+ resource->flags = IORESOURCE_IO;
+ compute_allocate_resource(&dev->link[link], resource,
+ IORESOURCE_IO, IORESOURCE_IO);
+
+ /* Initialize the system wide prefetchable memory resources constraints */
+ resource = new_resource(dev, 1|(link<<2));
+ resource->limit = 0xfcffffffffULL;
+ resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ compute_allocate_resource(&dev->link[link], resource,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH);
+
+ /* Initialize the system wide memory resources constraints */
+ resource = new_resource(dev, 2|(link<<2));
+ resource->limit = 0xfcffffffffULL;
+ resource->flags = IORESOURCE_MEM;
+ compute_allocate_resource(&dev->link[link], resource,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH,
+ IORESOURCE_MEM);
+ }
+#endif
+}
+
+static void ram_resource(device_t dev, unsigned long index,
+ resource_t basek, resource_t sizek)
+{
+ struct resource *resource;
+
+ if (!sizek) {
+ return;
+ }
+ resource = new_resource(dev, index);
+ resource->base = basek << 10;
+ resource->size = sizek << 10;
+ resource->flags = IORESOURCE_MEM | IORESOURCE_CACHEABLE | \
+ IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
+}
+
+static void tolm_test(void *gp, struct device *dev, struct resource *new)
+{
+ struct resource **best_p = gp;
+ struct resource *best;
+ best = *best_p;
+ if (!best || (best->base > new->base)) {
+ best = new;
+ }
+ *best_p = best;
+}
+
+static u32 find_pci_tolm(struct bus *bus, u32 tolm)
+{
+ struct resource *min;
+ min = 0;
+ search_bus_resources(bus, IORESOURCE_MEM, IORESOURCE_MEM, tolm_test, &min);
+ if (min && tolm > min->base) {
+ tolm = min->base;
+ }
+ return tolm;
+}
+
+#if CONFIG_PCI_64BIT_PREF_MEM == 1
+#define BRIDGE_IO_MASK (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH)
+#endif
+
+#if HW_MEM_HOLE_SIZEK != 0
+
+struct hw_mem_hole_info {
+ unsigned hole_startk;
+ int node_id;
+};
+
+static struct hw_mem_hole_info get_hw_mem_hole_info(void)
+{
+ struct hw_mem_hole_info mem_hole;
+ int i;
+
+ mem_hole.hole_startk = HW_MEM_HOLE_SIZEK;
+ mem_hole.node_id = -1;
+
+ for (i = 0; i < sysconf.nodes; i++) {
+ struct dram_base_mask_t d;
+ u32 hole;
+ d = get_dram_base_mask(i);
+ if(!(d.mask & 1)) continue; // no memory on this node
+
+ hole = pci_read_config32(__f1_dev[i], 0xf0);
+ if(hole & 1) { // we find the hole
+ mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
+ mem_hole.node_id = i; // record the node No with hole
+ break; // only one hole
+ }
+ }
+
+ //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
+ if(mem_hole.node_id==-1) {
+ resource_t limitk_pri = 0;
+ for(i=0; i<sysconf.nodes; i++) {
+ struct dram_base_mask_t d;
+ resource_t base_k, limit_k;
+ d = get_dram_base_mask(i);
+ if(!(d.base & 1)) continue;
+
+ base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
+ if(base_k > 4 *1024 * 1024) break; // don't need to go to check
+ if(limitk_pri != base_k) { // we find the hole
+ mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
+ mem_hole.node_id = i;
+ break; //only one hole
+ }
+
+ limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
+ limitk_pri = limit_k;
+ }
+ }
+ return mem_hole;
+}
+
+
+#if CONFIG_AMDMCT == 0
+static void disable_hoist_memory(unsigned long hole_startk, int i)
+{
+ int ii;
+ device_t dev;
+ struct dram_base_mask_t d;
+ u32 sel_m;
+ u32 sel_hi_en;
+ u32 hoist;
+ u32 hole_sizek;
+
+ u32 one_DCT;
+ struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
+ struct mem_info *meminfo;
+ meminfo = &sysinfox->meminfo[i];
+
+ one_DCT = get_one_DCT(meminfo);
+
+ // 1. find which node has hole
+ // 2. change limit in that node.
+ // 3. change base and limit in later node
+ // 4. clear that node f0
+
+ // if there is not mem hole enabled, we need to change it's base instead
+
+ hole_sizek = (4*1024*1024) - hole_startk;
+
+ for(ii=NODE_NUMS-1;ii>i;ii--) {
+
+ d = get_dram_base_mask(ii);
+
+ if(!(d.mask & 1)) continue;
+
+ d.base -= (hole_sizek>>9);
+ d.mask -= (hole_sizek>>9);
+ set_dram_base_mask(ii, d, sysconf.nodes);
+
+ if(get_DctSelHiEn(ii) & 1) {
+ sel_m = get_DctSelBaseAddr(ii);
+ sel_m -= hole_startk>>10;
+ set_DctSelBaseAddr(ii, sel_m);
+ }
+ }
+
+ d = get_dram_base_mask(i);
+ dev = __f1_dev[i];
+ hoist = pci_read_config32(dev, 0xf0);
+ sel_hi_en = get_DctSelHiEn(i);
+
+ if(sel_hi_en & 1) {
+ sel_m = get_DctSelBaseAddr(i);
+ }
+
+ if(hoist & 1) {
+ pci_write_config32(dev, 0xf0, 0);
+ d.mask -= (hole_sizek>>9);
+ set_dram_base_mask(i, d, sysconf.nodes);
+ if(one_DCT || (sel_m >= (hole_startk>>10))) {
+ if(sel_hi_en & 1) {
+ sel_m -= hole_startk>>10;
+ set_DctSelBaseAddr(i, sel_m);
+ }
+ }
+ if(sel_hi_en & 1) {
+ set_DctSelBaseOffset(i, 0);
+ }
+ }
+ else {
+ d.base -= (hole_sizek>>9);
+ d.mask -= (hole_sizek>>9);
+ set_dram_base_mask(i, d, sysconf.nodes);
+
+ if(sel_hi_en & 1) {
+ sel_m -= hole_startk>>10;
+ set_DctSelBaseAddr(i, sel_m);
+ }
+ }
+
+}
+#endif
+
+#endif
+
+static void pci_domain_set_resources(device_t dev)
+{
+#if CONFIG_PCI_64BIT_PREF_MEM == 1
+ struct resource *io, *mem1, *mem2;
+ struct resource *resource, *last;
+#endif
+ unsigned long mmio_basek;
+ u32 pci_tolm;
+ int i, idx;
+ u32 link;
+#if HW_MEM_HOLE_SIZEK != 0
+ struct hw_mem_hole_info mem_hole;
+ u32 reset_memhole = 1;
+#endif
+
+#if CONFIG_PCI_64BIT_PREF_MEM == 1
+
+ for(link=0; link<dev->links; link++) {
+ /* Now reallocate the pci resources memory with the
+ * highest addresses I can manage.
+ */
+ mem1 = find_resource(dev, 1|(link<<2));
+ mem2 = find_resource(dev, 2|(link<<2));
+
+ printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
+ mem1->base, mem1->limit, mem1->size, mem1->align);
+ printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
+ mem2->base, mem2->limit, mem2->size, mem2->align);
+
+ /* See if both resources have roughly the same limits */
+ if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
+ ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
+ {
+ /* If so place the one with the most stringent alignment first
+ */
+ if (mem2->align > mem1->align) {
+ struct resource *tmp;
+ tmp = mem1;
+ mem1 = mem2;
+ mem2 = tmp;
+ }
+ /* Now place the memory as high up as it will go */
+ mem2->base = resource_max(mem2);
+ mem1->limit = mem2->base - 1;
+ mem1->base = resource_max(mem1);
+ }
+ else {
+ /* Place the resources as high up as they will go */
+ mem2->base = resource_max(mem2);
+ mem1->base = resource_max(mem1);
+ }
+
+ printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
+ mem1->base, mem1->limit, mem1->size, mem1->align);
+ printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
+ mem2->base, mem2->limit, mem2->size, mem2->align);
+ }
+
+ last = &dev->resource[dev->resources];
+ for(resource = &dev->resource[0]; resource < last; resource++)
+ {
+ resource->flags |= IORESOURCE_ASSIGNED;
+ resource->flags &= ~IORESOURCE_STORED;
+ link = (resource>>2) & 3;
+ compute_allocate_resource(&dev->link[link], resource,
+ BRIDGE_IO_MASK, resource->flags & BRIDGE_IO_MASK);
+
+ resource->flags |= IORESOURCE_STORED;
+ report_resource_stored(dev, resource, "");
+
+ }
+#endif
+
+ pci_tolm = 0xffffffffUL;
+ for(link=0;link<dev->links; link++) {
+ pci_tolm = find_pci_tolm(&dev->link[link], pci_tolm);
+ }
+
+#warning "FIXME handle interleaved nodes"
+ mmio_basek = pci_tolm >> 10;
+ /* Round mmio_basek to something the processor can support */
+ mmio_basek &= ~((1 << 6) -1);
+
+#warning "FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M MMIO hole"
+ /* Round the mmio hold to 64M */
+ mmio_basek &= ~((64*1024) - 1);
+
+#if HW_MEM_HOLE_SIZEK != 0
+/* if the hw mem hole is already set in raminit stage, here we will compare
+ * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
+ * use hole_basek as mmio_basek and we don't need to reset hole.
+ * otherwise We reset the hole to the mmio_basek
+ */
+
+ mem_hole = get_hw_mem_hole_info();
+
+ // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
+ if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
+ mmio_basek = mem_hole.hole_startk;
+ reset_memhole = 0;
+ }
+
+ #if CONFIG_AMDMCT == 0
+ //mmio_basek = 3*1024*1024; // for debug to meet boundary
+
+ if(reset_memhole) {
+ if(mem_hole.node_id!=-1) {
+ /* We need to select HW_MEM_HOLE_SIZEK for raminit, it can not
+ make hole_startk to some basek too!
+ We need to reset our Mem Hole, because We want more big HOLE
+ than we already set
+ Before that We need to disable mem hole at first, becase
+ memhole could already be set on i+1 instead
+ */
+ disable_hoist_memory(mem_hole.hole_startk, mem_hole.node_id);
+ }
+
+ #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
+ // We need to double check if the mmio_basek is valid for hole
+ // setting, if it is equal to basek, we need to decrease it some
+ resource_t basek_pri;
+ for (i = 0; i < sysconf.nodes; i++) {
+ struct dram_base_mask_t d;
+ resource_t basek;
+ d = get_dram_base_mask(i);
+
+ if(!(d.mask &1)) continue;
+
+ basek = ((resource_t)(d.base & 0x1fffff00)) << 9;
+ if(mmio_basek == (u32)basek) {
+ mmio_basek -= (uin32_t)(basek - basek_pri); // increase mem hole size to make sure it is on middle of pri node
+ break;
+ }
+ basek_pri = basek;
+ }
+ #endif
+ }
+ #endif
+
+
+#endif
+
+ idx = 0x10;
+ for(i = 0; i < sysconf.nodes; i++) {
+ struct dram_base_mask_t d;
+ resource_t basek, limitk, sizek; // 4 1T
+ d = get_dram_base_mask(i);
+
+ if(!(d.mask & 1)) continue;
+ basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
+ limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
+ sizek = limitk - basek;
+
+ /* see if we need a hole from 0xa0000 to 0xbffff */
+ if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
+ ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
+ idx += 0x10;
+ basek = (8*64)+(16*16);
+ sizek = limitk - ((8*64)+(16*16));
+
+ }
+
+// printk_debug("node %d : mmio_basek=%08x, basek=%08x, limitk=%08x\n", i, mmio_basek, basek, limitk);
+
+ /* split the region to accomodate pci memory space */
+ if ( (basek < 4*1024*1024 ) && (limitk > mmio_basek) ) {
+ if (basek <= mmio_basek) {
+ unsigned pre_sizek;
+ pre_sizek = mmio_basek - basek;
+ if(pre_sizek>0) {
+ ram_resource(dev, (idx | i), basek, pre_sizek);
+ idx += 0x10;
+ sizek -= pre_sizek;
+ }
+ #if CONFIG_AMDMCT == 0
+ #if HW_MEM_HOLE_SIZEK != 0
+ if(reset_memhole) {
+ struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
+ struct mem_info *meminfo;
+ meminfo = &sysinfox->meminfo[i];
+ sizek += hoist_memory(mmio_basek,i, get_one_DCT(meminfo), sysconf.nodes);
+ }
+ #endif
+ #endif
+
+ basek = mmio_basek;
+ }
+ if ((basek + sizek) <= 4*1024*1024) {
+ sizek = 0;
+ }
+ else {
+ basek = 4*1024*1024;
+ sizek -= (4*1024*1024 - mmio_basek);
+ }
+ }
+ ram_resource(dev, (idx | i), basek, sizek);
+ idx += 0x10;
+ }
+
+ for(link = 0; link < dev->links; link++) {
+ struct bus *bus;
+ bus = &dev->link[link];
+ if (bus->children) {
+ assign_resources(bus);
+ }
+ }
+}
+
+static u32 pci_domain_scan_bus(device_t dev, u32 max)
+{
+ u32 reg;
+ int i;
+ /* Unmap all of the HT chains */
+ for(reg = 0xe0; reg <= 0xec; reg += 4) {
+ f1_write_config32(reg, 0);
+ }
+#if EXT_CONF_SUPPORT == 1
+ // all nodes
+ for(i = 0; i< sysconf.nodes; i++) {
+ int index;
+ for(index = 0; index < 64; index++) {
+ pci_write_config32(__f1_dev[i], 0x110, index | (6<<28));
+ pci_write_config32(__f1_dev[i], 0x114, 0);
+ }
+
+ }
+#endif
+
+
+ for(i=0;i<dev->links;i++) {
+ max = pci_scan_bus(&dev->link[i], PCI_DEVFN(CDB, 0), 0xff, max);
+ }
+
+ /* Tune the hypertransport transaction for best performance.
+ * Including enabling relaxed ordering if it is safe.
+ */
+ get_fx_devs();
+ for(i = 0; i < FX_DEVS; i++) {
+ device_t f0_dev;
+ f0_dev = __f0_dev[i];
+ if (f0_dev && f0_dev->enabled) {
+ u32 httc;
+ httc = pci_read_config32(f0_dev, HT_TRANSACTION_CONTROL);
+ httc &= ~HTTC_RSP_PASS_PW;
+ if (!dev->link[0].disable_relaxed_ordering) {
+ httc |= HTTC_RSP_PASS_PW;
+ }
+ printk_spew("%s passpw: %s\n",
+ dev_path(dev),
+ (!dev->link[0].disable_relaxed_ordering)?
+ "enabled":"disabled");
+ pci_write_config32(f0_dev, HT_TRANSACTION_CONTROL, httc);
+ }
+ }
+ return max;
+}
+
+static struct device_operations pci_domain_ops = {
+ .read_resources = pci_domain_read_resources,
+ .set_resources = pci_domain_set_resources,
+ .enable_resources = enable_childrens_resources,
+ .init = 0,
+ .scan_bus = pci_domain_scan_bus,
+#if MMCONF_SUPPORT
+ .ops_pci_bus = &pci_ops_mmconf,
+#else
+ .ops_pci_bus = &pci_cf8_conf1,
+#endif
+};
+
+static void sysconf_init(device_t dev) // first node
+{
+ sysconf.sblk = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
+ sysconf.segbit = 0;
+ sysconf.ht_c_num = 0;
+
+ unsigned ht_c_index;
+
+ for(ht_c_index=0; ht_c_index<32; ht_c_index++) {
+ sysconf.ht_c_conf_bus[ht_c_index] = 0;
+ }
+
+ sysconf.nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1;
+#if CONFIG_MAX_PHYSICAL_CPUS > 8
+ sysconf.nodes += (((pci_read_config32(dev, 0x160)>>4) & 7)<<3);
+#endif
+
+ sysconf.enabled_apic_ext_id = 0;
+ sysconf.lift_bsp_apicid = 0;
+
+ /* Find the bootstrap processors apicid */
+ sysconf.bsp_apicid = lapicid();
+ sysconf.apicid_offset = sysconf.bsp_apicid;
+
+#if (ENABLE_APIC_EXT_ID == 1)
+ if (pci_read_config32(dev, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
+ {
+ sysconf.enabled_apic_ext_id = 1;
+ }
+ #if (APIC_ID_OFFSET>0)
+ if(sysconf.enabled_apic_ext_id) {
+ if(sysconf.bsp_apicid == 0) {
+ /* bsp apic id is not changed */
+ sysconf.apicid_offset = APIC_ID_OFFSET;
+ } else {
+ sysconf.lift_bsp_apicid = 1;
+ }
+
+ }
+ #endif
+#endif
+
+}
+
+static u32 cpu_bus_scan(device_t dev, u32 max)
+{
+ struct bus *cpu_bus;
+ device_t dev_mc;
+ device_t pci_domain;
+ int i,j;
+ int nodes;
+ unsigned nb_cfg_54;
+ unsigned siblings;
+ int cores_found;
+ int disable_siblings;
+ unsigned ApicIdCoreIdSize;
+
+ nb_cfg_54 = 0;
+ ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf);
+ if(ApicIdCoreIdSize) {
+ siblings = (1<<ApicIdCoreIdSize)-1;
+ } else {
+ siblings = 3; //quad core
+ }
+
+ disable_siblings = !CONFIG_LOGICAL_CPUS;
+#if CONFIG_LOGICAL_CPUS == 1
+ get_option(&disable_siblings, "quad_core");
+#endif
+
+ // for pre_e0, nb_cfg_54 can not be set, ( even set, when you read it
+ // still be 0)
+ // How can I get the nb_cfg_54 of every node' nb_cfg_54 in bsp???
+ // and differ d0 and e0 single core
+
+ nb_cfg_54 = read_nb_cfg_54();
+
+#if CBB
+ dev_mc = dev_find_slot(0, PCI_DEVFN(CDB, 0)); //0x00
+ if(dev_mc && dev_mc->bus) {
+ printk_debug("%s found", dev_path(dev_mc));
+ pci_domain = dev_mc->bus->dev;
+ if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
+ printk_debug("\n%s move to ",dev_path(dev_mc));
+ dev_mc->bus->secondary = CBB; // move to 0xff
+ printk_debug("%s",dev_path(dev_mc));
+
+ } else {
+ printk_debug(" but it is not under pci_domain directly ");
+ }
+ printk_debug("\n");
+
+ }
+ dev_mc = dev_find_slot(CBB, PCI_DEVFN(CDB, 0));
+ if(!dev_mc) {
+ dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
+ if (dev_mc && dev_mc->bus) {
+ printk_debug("%s found\n", dev_path(dev_mc));
+ pci_domain = dev_mc->bus->dev;
+ if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
+ if((pci_domain->links==1) && (pci_domain->link[0].children == dev_mc)) {
+ printk_debug("%s move to ",dev_path(dev_mc));
+ dev_mc->bus->secondary = CBB; // move to 0xff
+ printk_debug("%s\n",dev_path(dev_mc));
+ while(dev_mc){
+ printk_debug("%s move to ",dev_path(dev_mc));
+ dev_mc->path.u.pci.devfn -= PCI_DEVFN(0x18,0);
+ printk_debug("%s\n",dev_path(dev_mc));
+ dev_mc = dev_mc->sibling;
+ }
+ }
+ }
+ }
+ }
+
+#endif
+
+ dev_mc = dev_find_slot(CBB, PCI_DEVFN(CDB, 0));
+ if (!dev_mc) {
+ printk_err("%02x:%02x.0 not found", CBB, CDB);
+ die("");
+ }
+
+ sysconf_init(dev_mc);
+
+ nodes = sysconf.nodes;
+
+#if CBB && (NODE_NUMS > 32)
+ if(nodes>32) { // need to put node 32 to node 63 to bus 0xfe
+ if(pci_domain->links==1) {
+ pci_domain->links++; // from 1 to 2
+ pci_domain->link[1].link = 1;
+ pci_domain->link[1].dev = pci_domain;
+ pci_domain->link[1].children = 0;
+ printk_debug("%s links increase to %d\n", dev_path(pci_domain), pci_domain->links);
+ }
+ pci_domain->link[1].secondary = CBB - 1;
+ }
+#endif
+ /* Find which cpus are present */
+ cpu_bus = &dev->link[0];
+ for(i = 0; i < nodes; i++) {
+ device_t dev, cpu;
+ struct device_path cpu_path;
+ unsigned busn, devn;
+ struct bus *pbus;
+
+ busn = CBB;
+ devn = CDB+i;
+ pbus = dev_mc->bus;
+#if CBB && (NODE_NUMS > 32)
+ if(i>=32) {
+ busn--;
+ devn-=32;
+ pbus = &(pci_domain->link[1]);
+ }
+#endif
+
+ /* Find the cpu's pci device */
+ dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
+ if (!dev) {
+ /* If I am probing things in a weird order
+ * ensure all of the cpu's pci devices are found.
+ */
+ int j;
+ for(j = 0; j <= 5; j++) { //FBDIMM?
+ dev = pci_probe_dev(NULL, pbus,
+ PCI_DEVFN(devn, j));
+ }
+ dev = dev_find_slot(busn, PCI_DEVFN(devn,0));
+ }
+ if(dev) {
+ /* Ok, We need to set the links for that device.
+ * otherwise the device under it will not be scanned
+ */
+ int j;
+ int linknum;
+#if HT3_SUPPORT==1
+ linknum = 8;
+#else
+ linknum = 4;
+#endif
+ if(dev->links < linknum) {
+ for(j=dev->links; j<linknum; j++) {
+ dev->link[j].link = j;
+ dev->link[j].dev = dev;
+ }
+ dev->links = linknum;
+ printk_debug("%s links increase to %d\n", dev_path(dev), dev->links);
+ }
+ }
+
+ cores_found = 0; // one core
+ dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
+ if (dev && dev->enabled) {
+ j = pci_read_config32(dev, 0xe8);
+ cores_found = (j >> 12) & 3; // dev is func 3
+ printk_debug(" %s siblings=%d\n", dev_path(dev), cores_found);
+ }
+
+ u32 jj;
+ if(disable_siblings) {
+ jj = 0;
+ } else
+ {
+ jj = cores_found;
+ }
+
+ for (j = 0; j <=jj; j++ ) {
+
+ /* Build the cpu device path */
+ cpu_path.type = DEVICE_PATH_APIC;
+ cpu_path.u.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:64); // ?
+
+ /* See if I can find the cpu */
+ cpu = find_dev_path(cpu_bus, &cpu_path);
+
+ /* Enable the cpu if I have the processor */
+ if (dev && dev->enabled) {
+ if (!cpu) {
+ cpu = alloc_dev(cpu_bus, &cpu_path);
+ }
+ if (cpu) {
+ cpu->enabled = 1;
+ }
+ }
+
+ /* Disable the cpu if I don't have the processor */
+ if (cpu && (!dev || !dev->enabled)) {
+ cpu->enabled = 0;
+ }
+
+ /* Report what I have done */
+ if (cpu) {
+ cpu->path.u.apic.node_id = i;
+ cpu->path.u.apic.core_id = j;
+ #if (ENABLE_APIC_EXT_ID == 1) && (APIC_ID_OFFSET>0)
+ if(sysconf.enabled_apic_ext_id) {
+ if(sysconf.lift_bsp_apicid) {
+ cpu->path.u.apic.apic_id += sysconf.apicid_offset;
+ } else
+ {
+ if (cpu->path.u.apic.apic_id != 0)
+ cpu->path.u.apic.apic_id += sysconf.apicid_offset;
+ }
+ }
+ #endif
+ printk_debug("CPU: %s %s\n",
+ dev_path(cpu), cpu->enabled?"enabled":"disabled");
+ }
+
+ } //j
+ }
+ return max;
+}
+
+
+static void cpu_bus_init(device_t dev)
+{
+ initialize_cpus(&dev->link[0]);
+}
+
+
+static void cpu_bus_noop(device_t dev)
+{
+}
+
+
+static struct device_operations cpu_bus_ops = {
+ .read_resources = cpu_bus_noop,
+ .set_resources = cpu_bus_noop,
+ .enable_resources = cpu_bus_noop,
+ .init = cpu_bus_init,
+ .scan_bus = cpu_bus_scan,
+};
+
+
+static void root_complex_enable_dev(struct device *dev)
+{
+ /* Set the operations if it is a special bus type */
+ if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
+ dev->ops = &pci_domain_ops;
+ }
+ else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
+ dev->ops = &cpu_bus_ops;
+ }
+}
+
+struct chip_operations northbridge_amd_amdfam10_root_complex_ops = {
+ CHIP_NAME("AMD FAM10 Root Complex")
+ .enable_dev = root_complex_enable_dev,
+};
diff --git a/src/northbridge/amd/amdfam10/northbridge.h b/src/northbridge/amd/amdfam10/northbridge.h
new file mode 100644
index 0000000000..b385e3df82
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/northbridge.h
@@ -0,0 +1,25 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef NORTHBRIDGE_AMD_AMDFAM10_H
+#define NORTHBRIDGE_AMD_AMDFAM10_H
+
+extern u32 amdfam10_scan_root_bus(device_t root, u32 max);
+
+#endif /* NORTHBRIDGE_AMD_AMDFAM10_H */
diff --git a/src/northbridge/amd/amdfam10/raminit.h b/src/northbridge/amd/amdfam10/raminit.h
new file mode 100644
index 0000000000..36ccf77cd8
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/raminit.h
@@ -0,0 +1,73 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef RAMINIT_H
+#define RAMINIT_H
+
+#if 0
+#if DIMM_SUPPORT==0x0110
+//FBDIMM REG
+/* each channel can have 8 fbdimm */
+#define DIMM_SOCKETS 8
+struct mem_controller {
+ u32 node_id;
+ device_t f0, f1, f2, f3, f4, f5;
+ /* channelA, channelB belong to DCT0,
+ * channelC, channelD belong to DCT1
+ * Each DCT may support one ganged logical FBDIMM ---> 128 bit
+ * or a single unganged channel --->64 bit
+ * a DCT can not support 2 unganged channels
+ * two DCTs can not be ganged
+ */
+ u8 spd_switch_addr;
+ u8 spd_addr[DIMM_SOCKETS*4];
+};
+
+#endif
+#endif
+
+//#if (DIMM_SUPPORT & 0x00ff)==0x0004
+//DDR2 REG and unbuffered : Socket F 1027 and AM3
+/* every channel have 4 DDR2 DIMM for socket F
+ * 2 for socket M2/M3
+ * 1 for socket s1g1
+ */
+#define DIMM_SOCKETS 4
+struct mem_controller {
+ u32 node_id;
+ device_t f0, f1, f2, f3, f4, f5;
+ /* channel0 is DCT0 --- channelA
+ * channel1 is DCT1 --- channelB
+ * can be ganged, a single dual-channel DCT ---> 128 bit
+ * or unganged a two single-channel DCTs ---> 64bit
+ * When the DCTs are ganged, the writes to DCT1 set of registers
+ * (F2x1XX) are ignored and reads return all 0's
+ * The exception is the DCT phy registers, F2x[1,0]98, F2x[1,0]9C,
+ * and all the associated indexed registers, are still
+ * independently accessiable
+ */
+ /* FIXME: I will only support ganged mode for easy support */
+ u8 spd_switch_addr;
+ u8 spd_addr[DIMM_SOCKETS*2];
+};
+
+//#endif
+
+
+#endif
diff --git a/src/northbridge/amd/amdfam10/raminit_amdmct.c b/src/northbridge/amd/amdfam10/raminit_amdmct.c
new file mode 100644
index 0000000000..05446331e7
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/raminit_amdmct.c
@@ -0,0 +1,155 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+static void print_raminit(const char *strval, u32 val)
+{
+ printk_debug("%s%08x\n", strval, val);
+}
+
+
+#define RAMINIT_DEBUG 1
+
+
+static void print_tx(const char *strval, u32 val)
+{
+#if RAMINIT_DEBUG == 1
+ print_raminit(strval, val);
+#endif
+}
+
+
+static void print_t(const char *strval)
+{
+#if RAMINIT_DEBUG == 1
+ print_debug(strval);
+#endif
+}
+#include "amdfam10.h"
+#include "../amdmct/wrappers/mcti.h"
+#include "../amdmct/amddefs.h"
+#include "../amdmct/mct/mct_d.h"
+#include "../amdmct/mct/mct_d_gcc.h"
+
+#include "../amdmct/wrappers/mcti_d.c"
+#include "../amdmct/mct/mct_d.c"
+
+
+#include "../amdmct/mct/mctmtr_d.c"
+#include "../amdmct/mct/mctcsi_d.c"
+#include "../amdmct/mct/mctecc_d.c"
+#include "../amdmct/mct/mctpro_d.c"
+#include "../amdmct/mct/mctdqs_d.c"
+#include "../amdmct/mct/mctsrc.c"
+#include "../amdmct/mct/mctsrc1p.c"
+#include "../amdmct/mct/mcttmrl.c"
+#include "../amdmct/mct/mcthdi.c"
+#include "../amdmct/mct/mctndi_d.c"
+#include "../amdmct/mct/mctchi_d.c"
+
+#if SYSTEM_TYPE == SERVER
+//L1
+#include "../amdmct/mct/mctardk3.c"
+#elif SYSTEM_TYPE == DESKTOP
+//AM2
+#include "../amdmct/mct/mctardk4.c"
+//#elif SYSTEM_TYPE == MOBILE
+//s1g1
+//#include "../amdmct/mct/mctardk5.c"
+#endif
+
+#include "../amdmct/mct/mct_fd.c"
+
+int mctRead_SPD(u32 smaddr, u32 reg)
+{
+ return spd_read_byte(smaddr, reg);
+}
+
+
+void mctSMBhub_Init(u32 node)
+{
+ struct sys_info *sysinfo = (struct sys_info *)(DCACHE_RAM_BASE + DCACHE_RAM_SIZE - DCACHE_RAM_GLOBAL_VAR_SIZE);
+ struct mem_controller *ctrl = &( sysinfo->ctrl[node] );
+ activate_spd_rom(ctrl);
+}
+
+
+void mctGet_DIMMAddr(struct DCTStatStruc *pDCTstat, u32 node)
+{
+ int j;
+ struct sys_info *sysinfo = (struct sys_info *)(DCACHE_RAM_BASE + DCACHE_RAM_SIZE - DCACHE_RAM_GLOBAL_VAR_SIZE);
+ struct mem_controller *ctrl = &( sysinfo->ctrl[node] );
+
+ for(j=0;j<DIMM_SOCKETS;j++) {
+ pDCTstat->DIMMAddr[j*2] = ctrl->spd_addr[j] & 0xff;
+ pDCTstat->DIMMAddr[j*2+1] = ctrl->spd_addr[DIMM_SOCKETS + j] & 0xff;
+ }
+
+}
+
+
+u32 mctGetLogicalCPUID(u32 Node)
+{
+ u32 dev;
+ u32 val, valx;
+ u32 family, model, stepping;
+ u32 ret;
+ dev = PA_NBMISC(Node);
+ val = Get_NB32(dev, 0xfc);
+ print_debug("Family_Model:"); print_debug_hex32(val); print_debug("\n");
+
+ family = ((val >> 8) & 0x0f) + ((val>>20) & 0xff);
+ model = ((val>>4) & 0x0f) | ((val>>(16-4)) & 0xf0);
+ stepping = val & 0xff;
+ print_debug("Family:"); print_debug_hex8(family); print_debug("\t");
+ print_debug("Model:"); print_debug_hex8(model); print_debug("\t");
+ print_debug("Stepping:"); print_debug_hex8(stepping); print_debug("\n");
+
+ valx = (family<<12) | (model<<4) | (stepping);
+ print_debug("converted:"); print_debug_hex32(valx); print_debug("\n");
+
+ switch (valx) {
+ case 0x10000:
+ ret = AMD_DR_A0A;
+ break;
+ case 0x10001:
+ ret = AMD_DR_A1B;
+ break;
+ case 0x10002:
+ ret = AMD_DR_A2;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+
+void raminit_amdmct(struct sys_info *sysinfo)
+{
+ struct MCTStatStruc *pMCTstat = &(sysinfo->MCTstat);
+ struct DCTStatStruc *pDCTstatA = sysinfo->DCTstatA;
+
+ print_debug("raminit_amdmct begin:\n");
+
+ mctAutoInitMCT_D(pMCTstat, pDCTstatA);
+
+ print_debug("raminit_amdmct end:\n");
+}
diff --git a/src/northbridge/amd/amdfam10/raminit_sysinfo_in_ram.c b/src/northbridge/amd/amdfam10/raminit_sysinfo_in_ram.c
new file mode 100644
index 0000000000..643c32c927
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/raminit_sysinfo_in_ram.c
@@ -0,0 +1,81 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+static void set_htic_bit(u8 i, u32 val, u8 bit)
+{
+ u32 dword;
+ dword = pci_read_config32(NODE_PCI(i, 0), HT_INIT_CONTROL);
+ dword &= ~(1<<bit);
+ dword |= ((val & 1) <<bit);
+ pci_write_config32(NODE_PCI(i, 0), HT_INIT_CONTROL, dword);
+}
+
+
+static u32 get_htic_bit(u8 i, u8 bit)
+{
+ u32 dword;
+ dword = pci_read_config32(NODE_PCI(i, 0), HT_INIT_CONTROL);
+ dword &= (1<<bit);
+ return dword;
+}
+
+static void wait_till_sysinfo_in_ram(void)
+{
+ while(1) {
+ /* give the NB a break, many CPUs spinning on one bit makes a
+ * lot of traffic and time is not too important to APs.
+ */
+ udelay_tsc(1000);
+ if(get_htic_bit(0, 9)) return;
+ }
+}
+
+static void set_sysinfo_in_ram(u32 val)
+{
+ set_htic_bit(0, val, 9);
+}
+
+static void fill_mem_ctrl(u32 controllers, struct mem_controller *ctrl_a, const u8 *spd_addr)
+{
+ int i;
+ int j;
+ int index = 0;
+ struct mem_controller *ctrl;
+ for(i=0;i<controllers; i++) {
+ ctrl = &ctrl_a[i];
+ ctrl->node_id = i;
+ ctrl->f0 = NODE_PCI(i, 0);
+ ctrl->f1 = NODE_PCI(i, 1);
+ ctrl->f2 = NODE_PCI(i, 2);
+ ctrl->f3 = NODE_PCI(i, 3);
+ ctrl->f4 = NODE_PCI(i, 4);
+ ctrl->f5 = NODE_PCI(i, 5);
+
+ if(spd_addr == (void *)0) continue;
+
+ ctrl->spd_switch_addr = spd_addr[index++];
+
+ for(j=0; j < 8; j++) {
+ ctrl->spd_addr[j] = spd_addr[index++];
+
+ }
+ }
+}
+
diff --git a/src/northbridge/amd/amdfam10/reset_test.c b/src/northbridge/amd/amdfam10/reset_test.c
new file mode 100644
index 0000000000..4298d6549f
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/reset_test.c
@@ -0,0 +1,169 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <cpu/x86/lapic.h>
+
+#define NODE_ID 0x60
+#define HT_INIT_CONTROL 0x6c
+#define HTIC_ColdR_Detect (1<<4)
+#define HTIC_BIOSR_Detect (1<<5)
+#define HTIC_INIT_Detect (1<<6)
+
+/* mmconf is not ready */
+/* io_ext is not ready */
+static u32 cpu_init_detected(u8 nodeid)
+{
+ u32 htic;
+ device_t dev;
+
+ dev = NODE_PCI(nodeid, 0);
+ htic = pci_io_read_config32(dev, HT_INIT_CONTROL);
+
+ return !!(htic & HTIC_INIT_Detect);
+}
+
+static u32 bios_reset_detected(void)
+{
+ u32 htic;
+ htic = pci_io_read_config32(PCI_DEV(CBB, CDB, 0), HT_INIT_CONTROL);
+
+ return (htic & HTIC_ColdR_Detect) && !(htic & HTIC_BIOSR_Detect);
+}
+
+static u32 cold_reset_detected(void)
+{
+ u32 htic;
+ htic = pci_io_read_config32(PCI_DEV(CBB, CDB, 0), HT_INIT_CONTROL);
+
+ return !(htic & HTIC_ColdR_Detect);
+}
+
+static u32 other_reset_detected(void) // other warm reset not started by BIOS
+{
+ u32 htic;
+ htic = pci_io_read_config32(PCI_DEV(CBB, CDB, 0), HT_INIT_CONTROL);
+
+ return (htic & HTIC_ColdR_Detect) && (htic & HTIC_BIOSR_Detect);
+}
+
+static void distinguish_cpu_resets(u8 nodeid)
+{
+ u32 htic;
+ device_t device;
+ device = NODE_PCI(nodeid, 0);
+ htic = pci_io_read_config32(device, HT_INIT_CONTROL);
+ htic |= HTIC_ColdR_Detect | HTIC_BIOSR_Detect | HTIC_INIT_Detect;
+ pci_io_write_config32(device, HT_INIT_CONTROL, htic);
+}
+
+static u32 warm_reset_detect(u8 nodeid)
+{
+ u32 htic;
+ device_t device;
+ device = NODE_PCI(nodeid, 0);
+ htic = pci_io_read_config32(device, HT_INIT_CONTROL);
+ return (htic & HTIC_ColdR_Detect) && !(htic & HTIC_BIOSR_Detect);
+}
+
+static void set_bios_reset(void)
+{
+
+ u32 nodes;
+ u32 htic;
+ device_t dev;
+ int i;
+
+ nodes = ((pci_read_config32(PCI_DEV(CBB, CDB, 0), 0x60) >> 4) & 7) + 1;
+
+ for(i = 0; i < nodes; i++) {
+ dev = NODE_PCI(i,0);
+ htic = pci_read_config32(dev, HT_INIT_CONTROL);
+ htic &= ~HTIC_BIOSR_Detect;
+ pci_write_config32(dev, HT_INIT_CONTROL, htic);
+ }
+}
+
+
+/* Look up a which bus a given node/link combination is on.
+ * return 0 when we can't find the answer.
+ */
+static u8 node_link_to_bus(u8 node, u8 link) // node are 6 bit, and link three bit
+{
+ u32 reg;
+ u32 val;
+
+ // put node and link in correct bit
+ val = ((node & 0x0f)<<4) | ((node & 0x30)<< (12-4)) | ((link & 0x07)<<8) ;
+
+ for(reg = 0xE0; reg < 0xF0; reg += 0x04) {
+ u32 config_map;
+ config_map = pci_io_read_config32(PCI_DEV(CBB, CDB, 1), reg);
+ if ((config_map & 3) != 3) {
+ continue;
+ }
+ if ((config_map & (((63 & 0x0f)<<4) | ((63 & 0x30)<< (12-4)) | ((7 & 0x07)<<8) )
+ ) == val )
+ {
+ return (config_map >> 16) & 0xff;
+ }
+ }
+
+#if EXT_CONF_SUPPORT == 1
+ // let's check that in extend space
+ // use the nodeid extend space to find out the bus for the linkn
+ u32 tempreg;
+ int i;
+ int j;
+ u32 cfg_map_dest;
+ device_t dev;
+
+ cfg_map_dest = (1<<7)|(1<<6)|link;
+
+ // three case: index_min==index_max, index_min+1=index_max; index_min+1<index_max
+ dev = NODE_PCI(node, 1);
+ for(j=0; j<64; j++) {
+ pci_io_write_config32(dev, 0x110, j | (6<<28));
+ tempreg = pci_io_read_config32(dev, 0x114);
+ for(i=0; i<=3; i++) {
+ tempreg >>= (i*8);
+ if((tempreg & ((1<<7)|(1<<6)|0x3f)) == cfg_map_dest) {
+ return (i+(j<<2)); //busn_min
+ }
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static u32 get_sblk(void)
+{
+ u32 reg;
+ /* read PCI_DEV(CBB,CDB,0) 0x64 bit [8:9] to find out SbLink m */
+ reg = pci_io_read_config32(PCI_DEV(CBB, CDB, 0), 0x64);
+ return ((reg>>8) & 3) ;
+}
+
+
+static u8 get_sbbusn(u8 sblk)
+{
+ return node_link_to_bus(0, sblk);
+}
+
diff --git a/src/northbridge/amd/amdfam10/resourcemap.c b/src/northbridge/amd/amdfam10/resourcemap.c
new file mode 100644
index 0000000000..a4f0e92d3b
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/resourcemap.c
@@ -0,0 +1,287 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+static void setup_default_resource_map(void)
+{
+ static const u32 register_values[] = {
+ /* Careful set limit registers before base registers which contain
+ the enables */
+ /* DRAM Limit i Registers
+ * F1:0x44 i = 0
+ * F1:0x4C i = 1
+ * F1:0x54 i = 2
+ * F1:0x5C i = 3
+ * F1:0x64 i = 4
+ * F1:0x6C i = 5
+ * F1:0x74 i = 6
+ * F1:0x7C i = 7
+ * [ 2: 0] Destination Node ID
+ * 000 = Node 0
+ * 001 = Node 1
+ * 010 = Node 2
+ * 011 = Node 3
+ * 100 = Node 4
+ * 101 = Node 5
+ * 110 = Node 6
+ * 111 = Node 7
+ * [ 7: 3] Reserved
+ * [10: 8] Interleave select
+ * specifies the values of A[14:12] to use with
+ * interleave enable.
+ * [15:11] Reserved
+ * [31:16] DRAM Limit Address i Bits 39-24
+ * This field defines the upper address bits of a 40 bit
+ * address that define the end of the DRAM region.
+ */
+ PCI_ADDR(CBB, CDB, 1, 0x44), 0x0000f8f8, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x4C), 0x0000f8f8, 0x00000001,
+ PCI_ADDR(CBB, CDB, 1, 0x54), 0x0000f8f8, 0x00000002,
+ PCI_ADDR(CBB, CDB, 1, 0x5C), 0x0000f8f8, 0x00000003,
+ PCI_ADDR(CBB, CDB, 1, 0x64), 0x0000f8f8, 0x00000004,
+ PCI_ADDR(CBB, CDB, 1, 0x6C), 0x0000f8f8, 0x00000005,
+ PCI_ADDR(CBB, CDB, 1, 0x74), 0x0000f8f8, 0x00000006,
+ PCI_ADDR(CBB, CDB, 1, 0x7C), 0x0000f8f8, 0x00000007,
+ /* DRAM Base i Registers
+ * F1:0x40 i = 0
+ * F1:0x48 i = 1
+ * F1:0x50 i = 2
+ * F1:0x58 i = 3
+ * F1:0x60 i = 4
+ * F1:0x68 i = 5
+ * F1:0x70 i = 6
+ * F1:0x78 i = 7
+ * [ 0: 0] Read Enable
+ * 0 = Reads Disabled
+ * 1 = Reads Enabled
+ * [ 1: 1] Write Enable
+ * 0 = Writes Disabled
+ * 1 = Writes Enabled
+ * [ 7: 2] Reserved
+ * [10: 8] Interleave Enable
+ * 000 = No interleave
+ * 001 = Interleave on A[12] (2 nodes)
+ * 010 = reserved
+ * 011 = Interleave on A[12] and A[14] (4 nodes)
+ * 100 = reserved
+ * 101 = reserved
+ * 110 = reserved
+ * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
+ * [15:11] Reserved
+ * [13:16] DRAM Base Address i Bits 39-24
+ * This field defines the upper address bits of a 40-bit
+ * address that define the start of the DRAM region.
+ */
+ PCI_ADDR(CBB, CDB, 1, 0x40), 0x0000f8fc, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x48), 0x0000f8fc, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x50), 0x0000f8fc, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x58), 0x0000f8fc, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x60), 0x0000f8fc, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x68), 0x0000f8fc, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x70), 0x0000f8fc, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x78), 0x0000f8fc, 0x00000000,
+
+ /* Memory-Mapped I/O Limit i Registers
+ * F1:0x84 i = 0
+ * F1:0x8C i = 1
+ * F1:0x94 i = 2
+ * F1:0x9C i = 3
+ * F1:0xA4 i = 4
+ * F1:0xAC i = 5
+ * F1:0xB4 i = 6
+ * F1:0xBC i = 7
+ * [ 2: 0] Destination Node ID
+ * 000 = Node 0
+ * 001 = Node 1
+ * 010 = Node 2
+ * 011 = Node 3
+ * 100 = Node 4
+ * 101 = Node 5
+ * 110 = Node 6
+ * 111 = Node 7
+ * [ 3: 3] Reserved
+ * [ 5: 4] Destination Link ID
+ * 00 = Link 0
+ * 01 = Link 1
+ * 10 = Link 2
+ * 11 = Reserved
+ * [ 6: 6] Reserved
+ * [ 7: 7] Non-Posted
+ * 0 = CPU writes may be posted
+ * 1 = CPU writes must be non-posted
+ * [31: 8] Memory-Mapped I/O Limit Address i (39-16)
+ * This field defines the upp adddress bits of a 40-bit
+ * address that defines the end of a memory-mapped
+ * I/O region n
+ */
+ PCI_ADDR(CBB, CDB, 1, 0x84), 0x00000048, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x8C), 0x00000048, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x94), 0x00000048, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x9C), 0x00000048, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xA4), 0x00000048, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xAC), 0x00000048, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xB4), 0x00000048, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xBC), 0x00000048, 0x00ffff00,
+
+ /* Memory-Mapped I/O Base i Registers
+ * F1:0x80 i = 0
+ * F1:0x88 i = 1
+ * F1:0x90 i = 2
+ * F1:0x98 i = 3
+ * F1:0xA0 i = 4
+ * F1:0xA8 i = 5
+ * F1:0xB0 i = 6
+ * F1:0xB8 i = 7
+ * [ 0: 0] Read Enable
+ * 0 = Reads disabled
+ * 1 = Reads Enabled
+ * [ 1: 1] Write Enable
+ * 0 = Writes disabled
+ * 1 = Writes Enabled
+ * [ 2: 2] Cpu Disable
+ * 0 = Cpu can use this I/O range
+ * 1 = Cpu requests do not use this I/O range
+ * [ 3: 3] Lock
+ * 0 = base/limit registers i are read/write
+ * 1 = base/limit registers i are read-only
+ * [ 7: 4] Reserved
+ * [31: 8] Memory-Mapped I/O Base Address i (39-16)
+ * This field defines the upper address bits of a 40bit
+ * address that defines the start of memory-mapped
+ * I/O region i
+ */
+ PCI_ADDR(CBB, CDB, 1, 0x80), 0x000000f0, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x88), 0x000000f0, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x90), 0x000000f0, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0x98), 0x000000f0, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xA0), 0x000000f0, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xA8), 0x000000f0, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xB0), 0x000000f0, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xB8), 0x000000f0, 0x00fc0003,
+
+ /* PCI I/O Limit i Registers
+ * F1:0xC4 i = 0
+ * F1:0xCC i = 1
+ * F1:0xD4 i = 2
+ * F1:0xDC i = 3
+ * [ 2: 0] Destination Node ID
+ * 000 = Node 0
+ * 001 = Node 1
+ * 010 = Node 2
+ * 011 = Node 3
+ * 100 = Node 4
+ * 101 = Node 5
+ * 110 = Node 6
+ * 111 = Node 7
+ * [ 3: 3] Reserved
+ * [ 5: 4] Destination Link ID
+ * 00 = Link 0
+ * 01 = Link 1
+ * 10 = Link 2
+ * 11 = reserved
+ * [11: 6] Reserved
+ * [24:12] PCI I/O Limit Address i
+ * This field defines the end of PCI I/O region n
+ * [31:25] Reserved
+ */
+ PCI_ADDR(CBB, CDB, 1, 0xC4), 0xFE000FC8, 0x01fff000,
+ PCI_ADDR(CBB, CDB, 1, 0xCC), 0xFE000FC8, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xD4), 0xFE000FC8, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xDC), 0xFE000FC8, 0x00000000,
+
+ /* PCI I/O Base i Registers
+ * F1:0xC0 i = 0
+ * F1:0xC8 i = 1
+ * F1:0xD0 i = 2
+ * F1:0xD8 i = 3
+ * [ 0: 0] Read Enable
+ * 0 = Reads Disabled
+ * 1 = Reads Enabled
+ * [ 1: 1] Write Enable
+ * 0 = Writes Disabled
+ * 1 = Writes Enabled
+ * [ 3: 2] Reserved
+ * [ 4: 4] VGA Enable
+ * 0 = VGA matches Disabled
+ * 1 = matches all address < 64K and where A[9:0] is in
+ * the range 3B0-3BB or 3C0-3DF independent of the
+ * base & limit registers
+ * [ 5: 5] ISA Enable
+ * 0 = ISA matches Disabled
+ * 1 = Blocks address < 64K and in the last 768 bytes of
+ * eack 1K block from matching agains this base/limit
+ * pair
+ * [11: 6] Reserved
+ * [24:12] PCI I/O Base i
+ * This field defines the start of PCI I/O region n
+ * [31:25] Reserved
+ */
+ PCI_ADDR(CBB, CDB, 1, 0xC0), 0xFE000FCC, 0x00000003,
+ PCI_ADDR(CBB, CDB, 1, 0xC8), 0xFE000FCC, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xD0), 0xFE000FCC, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xD8), 0xFE000FCC, 0x00000000,
+
+ /* Config Base and Limit i Registers
+ * F1:0xE0 i = 0
+ * F1:0xE4 i = 1
+ * F1:0xE8 i = 2
+ * F1:0xEC i = 3
+ * [ 0: 0] Read Enable
+ * 0 = Reads Disabled
+ * 1 = Reads Enabled
+ * [ 1: 1] Write Enable
+ * 0 = Writes Disabled
+ * 1 = Writes Enabled
+ * [ 2: 2] Device Number Compare Enable
+ * 0 = The ranges are based on bus number
+ * 1 = The ranges are ranges of devices on bus 0
+ * [ 3: 3] Reserved
+ * [ 6: 4] Destination Node
+ * 000 = Node 0
+ * 001 = Node 1
+ * 010 = Node 2
+ * 011 = Node 3
+ * 100 = Node 4
+ * 101 = Node 5
+ * 110 = Node 6
+ * 111 = Node 7
+ * [ 7: 7] Reserved
+ * [ 9: 8] Destination Link
+ * 00 = Link 0
+ * 01 = Link 1
+ * 10 = Link 2
+ * 11 - Reserved
+ * [15:10] Reserved
+ * [23:16] Bus Number Base i
+ * This field defines the lowest bus number in
+ * configuration region i
+ * [31:24] Bus Number Limit i
+ * This field defines the highest bus number in
+ * configuration regin i
+ */
+ PCI_ADDR(CBB, CDB, 1, 0xE0), 0x0000FC88, 0xff000003,
+ PCI_ADDR(CBB, CDB, 1, 0xE4), 0x0000FC88, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xE8), 0x0000FC88, 0x00000000,
+ PCI_ADDR(CBB, CDB, 1, 0xEC), 0x0000FC88, 0x00000000,
+ };
+
+ u32 max;
+ max = sizeof(register_values)/sizeof(register_values[0]);
+ setup_resource_map(register_values, max);
+}
+
diff --git a/src/northbridge/amd/amdfam10/root_complex/Config.lb b/src/northbridge/amd/amdfam10/root_complex/Config.lb
new file mode 100644
index 0000000000..610e929159
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/root_complex/Config.lb
@@ -0,0 +1 @@
+config chip.h
diff --git a/src/northbridge/amd/amdfam10/root_complex/chip.h b/src/northbridge/amd/amdfam10/root_complex/chip.h
new file mode 100644
index 0000000000..666147f949
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/root_complex/chip.h
@@ -0,0 +1,24 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+struct northbridge_amd_amdfam10_root_complex_config
+{
+};
+
+extern struct chip_operations northbridge_amd_amdfam10_root_complex_ops;
diff --git a/src/northbridge/amd/amdfam10/setup_resource_map.c b/src/northbridge/amd/amdfam10/setup_resource_map.c
new file mode 100644
index 0000000000..f463a58a16
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/setup_resource_map.c
@@ -0,0 +1,231 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#define RES_DEBUG 0
+
+static void setup_resource_map(const u32 *register_values, u32 max)
+{
+ u32 i;
+// print_debug("setting up resource map....");
+
+ for(i = 0; i < max; i += 3) {
+ device_t dev;
+ u32 where;
+ u32 reg;
+
+ dev = register_values[i] & ~0xff;
+ where = register_values[i] & 0xff;
+ reg = pci_read_config32(dev, where);
+ reg &= register_values[i+1];
+ reg |= register_values[i+2];
+ pci_write_config32(dev, where, reg);
+ }
+// print_debug("done.\n");
+}
+
+
+static void setup_resource_map_offset(const u32 *register_values,
+ u32 max, u32 offset_pci_dev,
+ u32 offset_io_base)
+{
+ u32 i;
+// print_debug("setting up resource map offset....");
+ for(i = 0; i < max; i += 3) {
+ device_t dev;
+ u32 where;
+ unsigned long reg;
+ dev = (register_values[i] & ~0xfff) + offset_pci_dev;
+ where = register_values[i] & 0xfff;
+ reg = pci_read_config32(dev, where);
+ reg &= register_values[i+1];
+ reg |= register_values[i+2] + offset_io_base;
+ pci_write_config32(dev, where, reg);
+ }
+// print_debug("done.\n");
+}
+
+#define RES_PCI_IO 0x10
+#define RES_PORT_IO_8 0x22
+#define RES_PORT_IO_32 0x20
+#define RES_MEM_IO 0x40
+
+static void setup_resource_map_x_offset(const u32 *register_values, u32 max,
+ u32 offset_pci_dev, u32 offset_io_base)
+{
+ u32 i;
+
+#if RES_DEBUG
+ print_debug("setting up resource map ex offset....");
+
+#endif
+
+#if RES_DEBUG
+ print_debug("\n");
+#endif
+ for(i = 0; i < max; i += 4) {
+#if RES_DEBUG
+ printk_debug("%04x: %02x %08x <- & %08x | %08x\n",
+ i/4, register_values[i],
+ register_values[i+1] + ( (register_values[i]==RES_PCI_IO) ? offset_pci_dev : 0),
+ register_values[i+2],
+ register_values[i+3] + ( ( (register_values[i] & RES_PORT_IO_32) == RES_PORT_IO_32) ? offset_io_base : 0)
+ );
+#endif
+ switch (register_values[i]) {
+ case RES_PCI_IO: //PCI
+ {
+ device_t dev;
+ u32 where;
+ u32 reg;
+ dev = (register_values[i+1] & ~0xfff) + offset_pci_dev;
+ where = register_values[i+1] & 0xfff;
+ reg = pci_read_config32(dev, where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ pci_write_config32(dev, where, reg);
+ }
+ break;
+ case RES_PORT_IO_8: // io 8
+ {
+ u32 where;
+ u32 reg;
+ where = register_values[i+1] + offset_io_base;
+ reg = inb(where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ outb(reg, where);
+ }
+ break;
+ case RES_PORT_IO_32: //io32
+ {
+ u32 where;
+ u32 reg;
+ where = register_values[i+1] + offset_io_base;
+ reg = inl(where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ outl(reg, where);
+ }
+ break;
+ } // switch
+
+
+ }
+
+#if RES_DEBUG
+ print_debug("done.\n");
+#endif
+}
+static void setup_resource_map_x(const u32 *register_values, u32 max)
+{
+ u32 i;
+
+#if RES_DEBUG
+ print_debug("setting up resource map ex offset....");
+#endif
+
+#if RES_DEBUG
+ print_debug("\n");
+#endif
+ for(i = 0; i < max; i += 4) {
+#if RES_DEBUG
+ printk_debug("%04x: %02x %08x <- & %08x | %08x\n",
+ i/4, register_values[i],register_values[i+1], register_values[i+2], register_values[i+3]);
+#endif
+ switch (register_values[i]) {
+ case RES_PCI_IO: //PCI
+ {
+ device_t dev;
+ u32 where;
+ u32 reg;
+ dev = register_values[i+1] & ~0xff;
+ where = register_values[i+1] & 0xff;
+ reg = pci_read_config32(dev, where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ pci_write_config32(dev, where, reg);
+ }
+ break;
+ case RES_PORT_IO_8: // io 8
+ {
+ u32 where;
+ u32 reg;
+ where = register_values[i+1];
+ reg = inb(where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ outb(reg, where);
+ }
+ break;
+ case RES_PORT_IO_32: //io32
+ {
+ u32 where;
+ u32 reg;
+ where = register_values[i+1];
+ reg = inl(where);
+ reg &= register_values[i+2];
+ reg |= register_values[i+3];
+ outl(reg, where);
+ }
+ break;
+ } // switch
+
+
+ }
+
+#if RES_DEBUG
+ print_debug("done.\n");
+#endif
+}
+
+static void setup_iob_resource_map(const u32 *register_values, u32 max)
+{
+ u32 i;
+
+ for(i = 0; i < max; i += 3) {
+ u32 where;
+ u32 reg;
+
+ where = register_values[i];
+ reg = inb(where);
+ reg &= register_values[i+1];
+ reg |= register_values[i+2];
+ outb(reg, where);
+ }
+}
+
+static void setup_io_resource_map(const u32 *register_values, u32 max)
+{
+ u32 i;
+
+ for(i = 0; i < max; i += 3) {
+ u32 where;
+ u32 reg;
+
+ where = register_values[i];
+ reg = inl(where);
+ reg &= register_values[i+1];
+ reg |= register_values[i+2];
+
+ outl(reg, where);
+ }
+}
+
+
diff --git a/src/northbridge/amd/amdfam10/spd_ddr2.h b/src/northbridge/amd/amdfam10/spd_ddr2.h
new file mode 100644
index 0000000000..176f76adee
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/spd_ddr2.h
@@ -0,0 +1,88 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* SPDs for DDR2 SDRAM */
+#define SPD_MEM_TYPE 2
+ #define SPD_MEM_TYPE_SDRAM_DDR 0x07
+ #define SPD_MEM_TYPE_SDRAM_DDR2 0x08
+
+#define SPD_DIMM_TYPE 20 /* x bit0 or bit4 =1 mean registered*/
+ #define SPD_DIMM_TYPE_RDIMM (1<<0)
+ #define SPD_DIMM_TYPE_UDIMM (1<<1)
+ #define SPD_DIMM_TYPE_SODIMM (1<<2)
+ #define SPD_DIMM_TYPE_uDIMM (1<<3)
+ #define SPD_DIMM_TYPE_mRDIMM (1<<4)
+ #define SPD_DIMM_TYPE_mUDIMM (1<<5)
+#define SPD_MOD_ATTRIB 21
+ #define SPD_MOD_ATTRIB_DIFCK 0x20
+ #define SPD_MOD_ATTRIB_REGADC 0x11 /* x */
+ #define SPD_MOD_ATTRIB_PROBE 0x40
+
+#define SPD_DEV_ATTRIB 22 /* Device attributes --- general */
+#define SPD_DIMM_CONF_TYPE 11
+ #define SPD_DIMM_CONF_TYPE_ECC 0x02
+ #define SPD_DIMM_CONF_TYPE_ADDR_PARITY 0x04 /* ? */
+
+#define SPD_ROW_NUM 3 /* Number of Row addresses */
+#define SPD_COL_NUM 4 /* Number of Column addresses */
+#define SPD_BANK_NUM 17 /* SDRAM Device attributes - Number of Banks on
+ SDRAM device, it could be 0x4, 0x8, so address
+ lines for that would be 2, and 3 */
+
+/* Number of Ranks bit [2:0], Package (bit4, 1=stack, 0=planr), Height bit[7:5] */
+#define SPD_MOD_ATTRIB_RANK 5
+ #define SPD_MOD_ATTRIB_RANK_NUM_SHIFT 0
+ #define SPD_MOD_ATTRIB_RANK_NUM_MASK 0x07
+ #define SPD_MOD_ATTRIB_RANK_NUM_BASE 1
+ #define SPD_MOD_ATTRIB_RANK_NUM_MIN 1
+ #define SPD_MOD_ATTRIB_RANK_NUM_MAX 8
+
+#define SPD_RANK_SIZE 31 /* Only one bit is set */
+ #define SPD_RANK_SIZE_1GB (1<<0)
+ #define SPD_RANK_SIZE_2GB (1<<1)
+ #define SPD_RANK_SIZE_4GB (1<<2)
+ #define SPD_RANK_SIZE_8GB (1<<3)
+ #define SPD_RANK_SIZE_16GB (1<<4)
+ #define SPD_RANK_SIZE_128MB (1<<5)
+ #define SPD_RANK_SIZE_256MB (1<<6)
+ #define SPD_RANK_SIZE_512MB (1<<7)
+
+#define SPD_DATA_WIDTH 6 /* valid value 0, 32, 33, 36, 64, 72, 80, 128, 144, 254, 255 */
+#define SPD_PRI_WIDTH 13 /* Primary SDRAM Width, it could be 0x08 or 0x10 */
+#define SPD_ERR_WIDTH 14 /* Error Checking SDRAM Width, it could be 0x08 or 0x10 */
+
+#define SPD_CAS_LAT 18 /* SDRAM Device Attributes -- CAS Latency */
+ #define SPD_CAS_LAT_2 (1<<2)
+ #define SPD_CAS_LAT_3 (1<<3)
+ #define SPD_CAS_LAT_4 (1<<4)
+ #define SPD_CAS_LAT_5 (1<<5)
+ #define SPD_CAS_LAT_6 (1<<6)
+
+#define SPD_TRP 27 /* bit [7:2] = 1-63 ns, bit [1:0] 0.25ns+, final value ((val>>2) + (val & 3) * 0.25)ns */
+#define SPD_TRRD 28
+#define SPD_TRCD 29
+#define SPD_TRAS 30
+#define SPD_TWR 36 /* x */
+#define SPD_TWTR 37 /* x */
+#define SPD_TRTP 38 /* x */
+
+#define SPD_TRC 41 /* add byte 0x40 bit [3:1] , so final val41+ table[((val40>>1) & 0x7)] ... table[]={0, 0.25, 0.33, 0.5, 0.75, 0, 0}*/
+#define SPD_TRFC 42 /* add byte 0x40 bit [6:4] , so final val42+ table[((val40>>4) & 0x7)] + (val40 & 1)*256*/
+
+#define SPD_TREF 12
diff --git a/src/northbridge/amd/amdfam10/ssdt.dsl b/src/northbridge/amd/amdfam10/ssdt.dsl
new file mode 100644
index 0000000000..dae5bb87cd
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/ssdt.dsl
@@ -0,0 +1,346 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Make sure HC_NUMS and HC_POSSIBLE_NUM setting is consistent to this file
+ */
+
+DefinitionBlock ("SSDT.aml", "SSDT", 1, "AMD-FAM10", "AMD-ACPI", 100925440)
+{
+ /*
+ * These objects were referenced but not defined in this table
+ */
+ External (\_SB_.PCI0, DeviceObj)
+
+ Scope (\_SB.PCI0)
+ {
+ Name (BUSN, Package (0x20) /* HC_NUMS */
+ {
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x10101010,
+ 0x11111111,
+ 0x12121212,
+ 0x13131313,
+ 0x14141414,
+ 0x15151515,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc
+ })
+ Name (MMIO, Package (0x80) /* HC_NUMS * 4 */
+ {
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x11111111,
+ 0x22222222,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x11111111,
+ 0x22222222,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x11111111,
+ 0x22222222,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x11111111,
+ 0x22222222,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x11111111,
+ 0x22222222,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888
+ })
+ Name (PCIO, Package (0x40) /* HC_NUMS * 2 */
+ {
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0xaaaaaaaa,
+ 0xbbbbbbbb,
+ 0xcccccccc,
+ 0xdddddddd,
+ 0xeeeeeeee,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x99999999,
+ 0xaaaaaaaa,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444
+ })
+ Name (SBLK, 0x11)
+ Name (TOM1, 0xaaaaaaaa)
+ Name (SBDN, 0xbbbbbbbb)
+ Name (HCLK, Package (0x20) /* HC_POSSIBLE_NUM */
+ {
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888
+ })
+ Name (HCDN, Package (0x20) /* HC_POSSIBLE_NUM */
+ {
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888,
+ 0x11111111,
+ 0x22222222,
+ 0x33333333,
+ 0x44444444,
+ 0x55555555,
+ 0x66666666,
+ 0x77777777,
+ 0x88888888
+ })
+ Name (CBB, 0x99)
+ Name (CBST, 0x88)
+ Name (CBB2, 0x77)
+ Name (CBS2, 0x66)
+
+ }
+}
+
diff --git a/src/northbridge/amd/amdfam10/sspr1.dsl b/src/northbridge/amd/amdfam10/sspr1.dsl
new file mode 100644
index 0000000000..b0bc3cb252
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/sspr1.dsl
@@ -0,0 +1,39 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+DefinitionBlock ("SSPR.aml", "SSDT", 1, "AMD-FAM10", "AMD-ACPI", 100925441)
+{
+ Scope (\_SB)
+ {
+ Processor (\_SB.CPAA, 0xbb, 0x120, 6) // CPU0 and 0x01 need to be updated
+ {
+ Name(_PCT, Package ()
+ {
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_CTRL
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_STATUS
+ })
+
+ Name(_PSS, Package()
+ {
+ Package(0x06) {0x1111, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ })
+ }
+
+ }
+}
diff --git a/src/northbridge/amd/amdfam10/sspr2.dsl b/src/northbridge/amd/amdfam10/sspr2.dsl
new file mode 100644
index 0000000000..d66a107afa
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/sspr2.dsl
@@ -0,0 +1,40 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+DefinitionBlock ("SSPR.aml", "SSDT", 1, "AMD-FAM10", "AMD-ACPI", 100925441)
+{
+ Scope (\_SB)
+ {
+ Processor (\_SB.CPAA, 0xbb, 0x120, 6) // CPU0 and 0x01 need to be updated
+ {
+ Name(_PCT, Package ()
+ {
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_CTRL
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_STATUS
+ })
+
+ Name(_PSS, Package()
+ {
+ Package(0x06) {0x1111, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0x7777, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ })
+ }
+
+ }
+}
diff --git a/src/northbridge/amd/amdfam10/sspr3.dsl b/src/northbridge/amd/amdfam10/sspr3.dsl
new file mode 100644
index 0000000000..62709824b4
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/sspr3.dsl
@@ -0,0 +1,41 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+DefinitionBlock ("SSPR.aml", "SSDT", 1, "AMD-FAM10", "AMD-ACPI", 100925441)
+{
+ Scope (\_SB)
+ {
+ Processor (\_SB.CPAA, 0xbb, 0x120, 6) // CPU0 and 0x01 need to be updated
+ {
+ Name(_PCT, Package ()
+ {
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_CTRL
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_STATUS
+ })
+
+ Name(_PSS, Package()
+ {
+ Package(0x06) {0x1111, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0x7777, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0x8888, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ })
+ }
+
+ }
+}
diff --git a/src/northbridge/amd/amdfam10/sspr4.dsl b/src/northbridge/amd/amdfam10/sspr4.dsl
new file mode 100644
index 0000000000..5634d0fd8e
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/sspr4.dsl
@@ -0,0 +1,42 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+DefinitionBlock ("SSPR.aml", "SSDT", 1, "AMD-FAM10", "AMD-ACPI", 100925441)
+{
+ Scope (\_SB)
+ {
+ Processor (\_SB.CPAA, 0xbb, 0x120, 6) // CPU0 and 0x01 need to be updated
+ {
+ Name(_PCT, Package ()
+ {
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_CTRL
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_STATUS
+ })
+
+ Name(_PSS, Package()
+ {
+ Package(0x06) {0x1111, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0x7777, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0x8888, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0x9999, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ })
+ }
+
+ }
+}
diff --git a/src/northbridge/amd/amdfam10/sspr5.dsl b/src/northbridge/amd/amdfam10/sspr5.dsl
new file mode 100644
index 0000000000..d63adff512
--- /dev/null
+++ b/src/northbridge/amd/amdfam10/sspr5.dsl
@@ -0,0 +1,43 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+DefinitionBlock ("SSPR.aml", "SSDT", 1, "AMD-FAM10", "AMD-ACPI", 100925441)
+{
+ Scope (\_SB)
+ {
+ Processor (\_SB.CPAA, 0xbb, 0x120, 6) // CPU0 and 0x01 need to be updated
+ {
+ Name(_PCT, Package ()
+ {
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_CTRL
+ ResourceTemplate() {Register (FFixedHW, 0, 0, 0)}, //PERF_STATUS
+ })
+
+ Name(_PSS, Package()
+ {
+ Package(0x06) {0x1111, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0x7777, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0x8888, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0x9999, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ Package(0x06) {0xaaaa, 0x222222, 0x3333, 0x4444, 0x55, 0x66 },
+ })
+ }
+
+ }
+}
diff --git a/src/northbridge/amd/amdht/AsPsDefs.h b/src/northbridge/amd/amdht/AsPsDefs.h
new file mode 100644
index 0000000000..2a5a908c71
--- /dev/null
+++ b/src/northbridge/amd/amdht/AsPsDefs.h
@@ -0,0 +1,252 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#ifndef ASPSDEFS_H
+#define ASPSDEFS_H
+
+/* AMD Platform Types */
+#define AMD_PTYPE_DSK 1
+#define AMD_PTYPE_MOB 2
+#define AMD_PTYPE_SVR 4
+#define AMD_PTYPE_DC 8
+#define AMD_PTYPE_MC 0x10
+#define AMD_PTYPE_UMA 0x20
+
+#define APIC_BAR 0x1b /* APIC_BAR register */
+#define APIC_BAR_BP 0x100 /* APIC_BAR BSP bit */
+
+#define PS_LIM_REG 0xC0010061 /* P-state Current Limit Register */
+#define PS_CUR_LIM_SHFT 4 /* P-state Current Limit shift position */
+
+#define PS_CTL_REG 0xC0010062 /* P-state Control Register */
+#define PS_CMD_MASK_OFF 0xfffffff8 /* P-state Control Register CMD Mask OFF */
+
+#define PS_STS_REG 0xC0010063 /* P-state Status Register */
+#define PS_STS_MASK 0x7 /* P-state Status Mask */
+
+#define PS_REG_BASE 0xC0010064 /* P-state Register base */
+#define PS_MAX_REG 0xC0010068 /* Maximum P-State Register */
+#define PS_MIN_REG 0xC0010064 /* Mimimum P-State Register */
+
+/* P-state register offset */
+#define PS_REG0 0 /* offset for P0 */
+#define PS_REG1 1 /* offset for P1 */
+#define PS_REG2 2 /* offset for P2 */
+#define PS_REG3 3 /* offset for P3 */
+#define PS_REG4 4 /* offset for P4 */
+
+#define PS_PSDIS_MASK 0x7fffffff /* disable P-state register */
+#define PS_EN_MASK 0x80000000 /* P-state register enable mask */
+#define PS_NB_DID_MASK 0x400000 /* P-state Reg[NbDid] Mask */
+#define PS_NB_VID_M_OFF 0x01ffffff /* P-state Reg[NbVid] Mask OFF */
+#define PS_CPU_VID_M_ON 0x0fe00 /* P-state Reg[CpuVid] Mask On */
+#define PS_NB_VID_M_ON 0x0fe000000 /* P-state Reg[NbVid] Mask On */
+#define PS_CPU_VID_SHFT 9 /* P-state bit shift for CpuVid */
+#define PS_NB_VID_SHFT 25 /* P-state bit shift for NbVid */
+#define PS_BOTH_VID_OFF 0x01ff01ff /* Mask NbVid & CpuVid */
+#define PS_CPU_NB_VID_SHFT 16 /* P-state bit shift from CpuVid to NbVid */
+#define PS_NB_VID_SHFT 25 /* P-state NBVID shift */
+#define PS_DIS 0x7fffffff /* disable P-state reg */
+#define PS_EN 0x80000000 /* enable P-state reg */
+#define PS_CURDIV_SHFT 8 /* P-state Current Divisor shift position */
+#define PS_CPUDID_SHIFT 6 /* P-state CPU DID shift position */
+
+/* for unfused parts */
+#define PS_NB_VID_110V 0x48000000
+#define PS_NB_VID_1175V 0x3c000000
+/* NB VID 1.100V =0x12[PVI]=0x24[SVI] = 0100100b 7-bit code */
+
+#define PS_NB_DID0 0 /* NB DID 0 */
+#define PS_NB_DID1 0x400000 /* NB DID 1 */
+#define PS_CPU_VID_110V 0x4800 /* CPU VID 1.100V */
+#define PS_CPU_VID_1175V 0x3c00 /* CPU VID 1.175V */
+#define PS_CPU_DID 0x40 /* CPU DID 1 = divisor of 2 */
+#define PS_CPU_DID0 0 /* CPU DID 0 = divisor of 1 */
+#define PS_CPU_FID_16G 0x00 /* CPU FID of 00 = 1.6GHz */
+#define PS_CPU_FID_16G1 0x10 /* CPU FId of 16 COF = 16+16/2 = 16 */
+#define PS_CPU_FID_18G 20 /* CPU FId of 20 COF = 20+16/2 = 18 */
+#define PS_CPU_FID_19G 22 /* CPU FId of 20 COF = 22+16/2 = 19 */
+#define PS_CPU_FID_20G 24 /* CPU FId of 20 COF = 24+16/2 = 20 */
+#define PS_CPU_FID_22G 28 /* CPU FId of 2C COF = 28+16/2 = 22 */
+#define PS_CPU_FID_30G 44 /* CPU FId of 2C COF = 44+16/2 = 30 */
+
+
+
+#define PCI_DEV_BASE 24 /* System PCI device ID base */
+#define LOCAL_APIC_ID_SHIFT 24 /* Local APCI ID shift bit # */
+#define APIC_CID_SIZE_SHIFT 12 /* ApicCoreIdSize shift bit # */
+#define FN_0 0 /* Function 0 */
+#define FN_1 1 /* Function 1 */
+#define FN_2 2 /* Function 2 */
+#define FN_3 3 /* Function 3 */
+#define FN_4 4 /* Function 4 */
+#define FN_5 5 /* Function 5 */
+#define FN_80000000 0x80000000 /* Function 8000_0000 */
+#define FN_80000001 0x80000001 /* Function 8000_0001 */
+#define FN_80000008 0x80000008 /* Function 8000_0008 */
+
+#define LNK_INIT_REG 0x6C /* F0x6C link initialization control register */
+#define WARM_RESET_BIT 0x10 /* bit 4 =1 : warm reset */
+
+#define HTC_REG 0x64 /* hardware thermal control reg */
+#define HTC_PS_LMT_MASK 0x8fffffff /* HtcPstateLimit mask off */
+#define PS_LIMIT_POS 28 /* PstateLimit position for HTC & STC */
+
+#define STC_REG 0x68 /* software thermal control reg */
+#define STC_PS_LMT_MASK 0x8fffffff /* StcPstateLimit mask off */
+
+#define CPTC0 0x0d4 /* Clock Power/Timing Control0 Register*/
+#define CPTC0_MASK 0x000c0fff /* Reset mask for this register */
+#define CPTC0_NBFID_MASK 0xffffffe0 /* NbFid mask off for this register */
+#define CPTC0_NBFID_MON 0x1f /* NbFid mask on for this register */
+#define NB_FID_EN 0x20 /* NbFidEn bit ON */
+#define NB_CLKDID_ALL 0x80000000 /* NbClkDidApplyAll bit ON */
+#define NB_CLKDID 0x40000000 /* NbClkDid value set by BIOS */
+#define PW_STP_UP50 0x08000000 /* PowerStepUp 50nS(1000b) */
+#define PW_STP_DN50 0x00800000 /* PowerStepDown 50nS (1000b)*/
+#define PW_STP_UP100 0x03000000 /* PowerStepUp 100nS(0011b) */
+#define PW_STP_DN100 0x00300000 /* PowerStepDown 100nS (0011b)*/
+#define PW_STP_UP200 0x02000000 /* PowerStepUp 200nS(0010b) */
+#define PW_STP_DN200 0x00200000 /* PowerStepDown 200nS (0010b)*/
+#define PW_STP_UP400 0x00000000 /* PowerStepUp 400nS(0000b) */
+#define PW_STP_DN400 0x00000000 /* PowerStepDown 400nS (0000b)*/
+
+
+#define LNK_PLL_LOCK 0x00010000 /* LnkPllLock value set (01b) by BIOS */
+
+
+
+#define PSTATE_CTL 0xC0010070 /* P-state Control Register */
+#define NB_VID_POS 25 /* NbVid bit shift for position */
+#define NB_VID_MASK_OFF 0x01ffffff /* NbVid bits mask off */
+#define NB_VID_MASK_ON 0xfe000000 /* NbVid bits mask on */
+#define CPU_VID_POS 0x9 /* CpuVid bit shift for position */
+#define CPU_VID_MASK_OFF 0xffff01ff /* CpuVid bits mask off */
+#define CPU_VID_MASK_ON 0x0000fe00 /* CpuVid bits mask on */
+#define CPU_FID_DID_M_ON 0x000001ff /* CpuFid & CpuDid mask on */
+#define CPU_FID_DID_M_OFF 0xfffffe00 /* CpuFid & CpuDid mask off */
+#define NB_DID_VID_M_ON 0xfe400000 /* NbDid & NbVid mask on */
+#define NB_DID_M_ON 0x00400000 /* NbDid mask on */
+#define NB_DID_M_OFF 0xffbfffff /* NbDid mask off */
+#define NB_DID_POS 22 /* NbDid bit shift for position */
+#define PS_M_OFF 0xfff8ffff /* Cur Pstate mask off */
+#define PS_1 0x00010000 /* P-state 1 */
+#define PS_2 0x00020000 /* P-state 2 */
+#define PS_CPU_DID_1 0x40 /* Cpu Did 1 */
+
+
+
+
+#define PSTATE_STS 0xC0010071 /* P-state Status Register */
+#define STARTUP_PS_MASK 0x7 /* StartupPstate Mask */
+
+/* define for NB VID & CPU VID transition functions */
+#define IS_NB 1
+#define IS_CPU 0
+
+/* F3xD8 Clock Power/Timing Control 1 Register */
+#define CPTC1 0xd8 /* Clock Power/Timing Control1 Register*/
+#define VSRAMP_SLAM_MASK 0xffffff88 /* MaskOff [VSRampTime]&[VSSlamTime] */
+#define VSRAMP_SLAM_VALUE 0x16 /* [VSRampTime]=001b&[VSSlamTime]=110b */
+#define VS_RAMP_T 4 /* VSRampTime bit position */
+#define PWR_PLN_SHIFT 28 /* PwrPlanes bit shift */
+#define PWR_PLN_ON 0x10000000 /* PwrPlanes bit ON */
+#define PWR_PLN_OFF 0x0efffffff /* PwrPlanes bit OFF */
+
+
+
+/* Northbridge Capability Register */
+#define NB_CAP 0xe8 /* Northbridge Cap Reg */
+#define CMP_CAP_SHFT 12 /* CMP CAP - number of enabled cores */
+
+/* F3xDC Clock Power/Timing Control 2 Register */
+#define CPTC2 0xdc /* Clock Power/Timing Control2 Register*/
+#define PS_MAX_VAL_POS 8 /* PstateMaxValue bit shift */
+#define PS_MAX_VAL_MASK 0xfffff8ff /* PstateMaxValue Mask off */
+
+#define PRCT_INFO 0x1fc /* Product Info Register */
+#define UNI_NB_FID_BIT 2 /* UniNbFid bit position */
+#define UNI_NB_VID_BIT 7 /* UniNbVid bit position */
+#define SPLT_NB_FID_OFFSET 14 /* SpltNbFidOffset value bit position */
+#define SPLT_NB_VID_OFFSET 17 /* SpltNbVidOffset value bit position */
+#define NB_CV_UPDATE 0x01 /* F3x1FC[NbCofVidUpdated] bit mask */
+#define NB_VID_UPDATE_ALL 0x02 /* F3x1FC[NbVidUpdatedAll] bit mask */
+#define C_FID_DID_M_OFF 0xfffffe00 /* mask off Core FID & DID */
+
+#define PW_CTL_MISC 0x0a0 /* Power Control Miscellaneous Register */
+#define COF_VID_PROG_BIT 0x80000000 /* CofVidProg bit. 0= unfused part */
+#define DUAL_VDD_BIT 0x40000000 /* DualVdd bit. */
+#define NB_COFVID_UPDATE_BIT 0x01 /* NbCOFVIDUpdated bit */
+#define PVI_MODE 0x100 /* PviMode bit mask */
+#define VID_SLAM_OFF 0x0dfffffff /* set VidSlamMode OFF */
+#define VID_SLAM_ON 0x020000000 /* set VidSlamMode ON */
+#define PLLLOCK_OFF 0x0ffffc7ff /* PllLockTime Mask OFF */
+#define PLLLOCK_DFT 0x00001800 /* PllLockTime default value = 011b */
+#define PLLLOCK_DFT_L 0x00002800 /* PllLockTime long value = 101b */
+
+/* P-state Specification register base in PCI sapce */
+#define PS_SPEC_REG 0x1e0 /* PS Spec register base address */
+#define PCI_REG_LEN 4 /* PCI register length */
+#define NB_DID_MASK 0x10000 /* NbDid bit mask */
+#define NB_DID_2 2 /* NbDid = 2 */
+#define NB_DID_1 1 /* NbDid = 1 */
+#define SPEC_PWRDIV_M_ON 0x06000000 /* PwrDiv mask on */
+#define SPEC_PWRVAL_M_ON 0x01e00000 /* PwrValue mask on */
+#define SPEC_PWRDIV_SHFT 25 /* PwrDiv shift */
+#define SPEC_PWRVAL_SHFT 17 /* PwrValue shift */
+
+/* F4x1F4 Northbridge P-state spec register */
+#define NB_PS_SPEC_REG 0x1f4 /* Nb PS spec reg */
+
+#define NM_PS_REG 5 /* number of P-state MSR registers */
+
+/* sFidVidInit.outFlags defines */
+#define PWR_CK_OK 0 /* System board check OK */
+#define PWR_CK_NO_PS 1 /* All P-state registers are over
+ the limit */
+
+/* bit mask */
+#define BIT_MASK_1 0x1
+#define BIT_MASK_2 0x3
+#define BIT_MASK_3 0x7
+#define BIT_MASK_4 0x0f
+#define BIT_MASK_5 0x1f
+#define BIT_MASK_6 0x3f
+#define BIT_MASK_7 0x7f
+#define BIT_MASK_8 0x0ff
+
+/* VID Code */
+#define VID_1_100V 0x12 /* 1.100V */
+#define VID_1_175V 0x1E /* 1.175V */
+
+
+/* Nb Fid Code */
+#define NB_FID_800M 0x00 /* 800MHz */
+
+/* Nb DID Code */
+#define NB_DID_0 0
+#define NB_DID_1 1
+
+/* GH Logical ID */
+
+#define GH_REV_A2 0x4 /* GH Rev A2 logical ID, Upper half */
+
+
+#endif
diff --git a/src/northbridge/amd/amdht/AsPsNb.c b/src/northbridge/amd/amdht/AsPsNb.c
new file mode 100644
index 0000000000..8f79cd107f
--- /dev/null
+++ b/src/northbridge/amd/amdht/AsPsNb.c
@@ -0,0 +1,145 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+/*
+ *----------------------------------------------------------------------------
+ * MODULES USED
+ *
+ *----------------------------------------------------------------------------
+ */
+#undef FILECODE
+#define FILECODE 0xCCCC
+#include "comlib.h"
+#include "AsPsDefs.h"
+#include "AsPsNb.h"
+
+
+/*----------------------------------------------------------------------------
+ * PROTOTYPES OF LOCAL FUNCTIONS
+ *
+ *----------------------------------------------------------------------------
+ */
+u8 getNumOfNodeNb(void);
+u8 translateNodeIdToDeviceIdNb(u8 nodeId);
+
+
+/*----------------------------------------------------------------------------
+ * FUNCTION: getMinNbCOF
+ * INPUT: None
+ * OUTPUT: minNbCOF (in multiple of half of CLKIN, 100MHz)
+ * DESCRIPTION:
+ * This function returns the minimum possible NbCOF (in 100MHz)
+ * for the system .
+ * This function can be run on any core and is used by the HT & Memory init code
+ * in Phase 1.
+ * ----------------------------------------------------------------------------
+ */
+u8 getMinNbCOF(void)
+{
+ u8 numOfNode, i, j, deviceId, nbDid, nbFid, nextNbFid;
+ u32 dtemp;
+
+ nbDid = 0;
+ nbFid = 0;
+
+ /* get number of node in the system */
+ numOfNode = getNumOfNodeNb();
+
+ /* go through each node for the minimum NbCOF (in multiple of CLKIN/2) */
+ for(i=0; i < numOfNode; i++)
+ {
+ /* stub function for APIC ID virtualization for large MP system later */
+ deviceId = translateNodeIdToDeviceIdNb(i);
+
+ /* read all P-state spec registers for NbDid=1 */
+ for(j=0; j < 5; j++)
+ {
+ AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_4,PS_SPEC_REG+(j*PCI_REG_LEN)), &dtemp); /*F4x1E0 + j*4 */
+ /* get NbDid */
+ if(dtemp & NB_DID_MASK)
+ nbDid = 1;
+ }
+ /* if F3x1FC[NbCofVidUpdate]=0, NbFid = default value */
+ AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,PRCT_INFO), &dtemp); /*F3x1FC*/
+ if(!(dtemp & NB_CV_UPDATE)) /* F3x1FC[NbCofVidUpdated]=0, use default VID */
+ {
+ AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,CPTC0), &dtemp); /*F3xD4*/
+ nextNbFid = (u8) (dtemp & BIT_MASK_5);
+ if(nbDid)
+ nextNbFid = (u8) (nextNbFid >> 1);
+ }
+ else
+ {
+ /* check PVI/SPI */
+ AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,PW_CTL_MISC), &dtemp); /*F3xA0*/
+ if(dtemp & PVI_MODE) /* PVI */
+ {
+ AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,PRCT_INFO), &dtemp); /*F3x1FC*/
+ nextNbFid = (u8) (dtemp >> UNI_NB_FID_BIT);
+ nextNbFid &= BIT_MASK_5;
+ /* if(nbDid)
+ nextNbFid = nextNbFid >> 1; */
+ }
+ else /* SVI */
+ {
+ AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,PRCT_INFO), &dtemp); /*F3x1FC*/
+ nextNbFid = (u8) ((dtemp >> UNI_NB_FID_BIT) & BIT_MASK_5);
+ nextNbFid = (u8) (nextNbFid + ((dtemp >> SPLT_NB_FID_OFFSET) & BIT_MASK_3));
+ /* if(nbDid)
+ nextNbFid = nextNbFid >> 1; */
+ }
+ }
+ if( i == 0)
+ nbFid = nextNbFid;
+ else if( nbFid > nextNbFid )
+ nbFid = nextNbFid;
+ }
+
+ /* add the base and convert to 100MHz divide by 2 if DID=1 */
+ if(nbDid)
+ nbFid = (u8) (nbFid + 4);
+ else
+ nbFid = (u8) ((nbFid + 4) << 1);
+ return nbFid;
+}
+
+u8 getNumOfNodeNb(void)
+{
+ u32 dtemp;
+
+ AmdPCIRead(MAKE_SBDFO(0,0,24,0,0x60), &dtemp);
+ dtemp = (dtemp >> 4) & BIT_MASK_3;
+ dtemp++;
+ return (u8)dtemp;
+}
+
+/*----------------------------------------------------------------------------
+ * FUNCTION: translateNodeIdToDeviceId
+ * INPUT: u8 nodeId - node ID of the node
+ * OUTPUT: u8 - PCI device ID of the node
+ * DESCRIPTION:
+ * This function return the PCI device ID for PCI access using node ID.
+ * This function may need to chnage node ID to device ID in big MP systems.
+ * ----------------------------------------------------------------------------
+ */
+u8 translateNodeIdToDeviceIdNb(u8 nodeId)
+{
+ return (u8) (nodeId+PCI_DEV_BASE);
+}
diff --git a/src/northbridge/amd/amdht/AsPsNb.h b/src/northbridge/amd/amdht/AsPsNb.h
new file mode 100644
index 0000000000..c7ac180b3e
--- /dev/null
+++ b/src/northbridge/amd/amdht/AsPsNb.h
@@ -0,0 +1,26 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#ifndef ASPSNB_H
+#define ASPSNB_H
+
+u8 getMinNbCOF(void);
+
+#endif
diff --git a/src/northbridge/amd/amdht/comlib.c b/src/northbridge/amd/amdht/comlib.c
new file mode 100644
index 0000000000..47c538b19e
--- /dev/null
+++ b/src/northbridge/amd/amdht/comlib.c
@@ -0,0 +1,290 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef FILECODE
+#define FILECODE 0xCCCC
+#include "comlib.h"
+
+/*
+ *---------------------------------------------------------------------------
+ * EXPORTED FUNCTIONS
+ *
+ *---------------------------------------------------------------------------
+ */
+
+void CALLCONV AmdPCIReadBits(SBDFO loc, u8 highbit, u8 lowbit, u32 *pValue)
+{
+ ASSERT(highbit < 32 && lowbit < 32 && highbit >= lowbit && (loc & 3) == 0);
+
+ AmdPCIRead(loc, pValue);
+ *pValue = *pValue >> lowbit; /* Shift */
+
+ /* A 1<<32 == 1<<0 due to x86 SHL instruction, so skip if that is the case */
+ if ((highbit-lowbit) != 31)
+ *pValue &= (((u32)1 << (highbit-lowbit+1))-1);
+}
+
+
+void CALLCONV AmdPCIWriteBits(SBDFO loc, u8 highbit, u8 lowbit, u32 *pValue)
+{
+ u32 temp, mask;
+
+ ASSERT(highbit < 32 && lowbit < 32 && highbit >= lowbit && (loc & 3) == 0);
+
+ /* A 1<<32 == 1<<0 due to x86 SHL instruction, so skip if that is the case */
+ if ((highbit-lowbit) != 31)
+ mask = (((u32)1 << (highbit-lowbit+1))-1);
+ else
+ mask = (u32)0xFFFFFFFF;
+
+ AmdPCIRead(loc, &temp);
+ temp &= ~(mask << lowbit);
+ temp |= (*pValue & mask) << lowbit;
+ AmdPCIWrite(loc, &temp);
+}
+
+
+/*
+ * Given a SBDFO this routine will find the next PCI capabilities list entry.
+ * If the end of the list of reached, or if a problem is detected, then
+ * ILLEGAL_SBDFO is returned.
+ *
+ * To start a new search from the beginning of head of the list, specify a
+ * SBDFO with a offset of zero.
+ */
+void CALLCONV AmdPCIFindNextCap(SBDFO *pCurrent)
+{
+ SBDFO base;
+ u32 offset;
+ u32 temp;
+
+ if (*pCurrent == ILLEGAL_SBDFO)
+ return;
+
+ offset = SBDFO_OFF(*pCurrent);
+ base = *pCurrent - offset;
+ *pCurrent = ILLEGAL_SBDFO;
+
+ /* Verify that the SBDFO points to a valid PCI device SANITY CHECK */
+ AmdPCIRead(base, &temp);
+ if (temp == 0xFFFFFFFF)
+ return; /* There is no device at this address */
+
+ /* Verify that the device supports a capability list */
+ AmdPCIReadBits(base + 0x04, 20, 20, &temp);
+ if (temp == 0)
+ return; /* This PCI device does not support capability lists */
+
+ if (offset != 0)
+ {
+ /* If we are continuing on an existing list */
+ AmdPCIReadBits(base + offset, 15, 8, &temp);
+ }
+ else
+ {
+ /* We are starting on a new list */
+ AmdPCIReadBits(base + 0x34, 7, 0, &temp);
+ }
+
+ if (temp == 0)
+ return; /* We have reached the end of the capabilties list */
+
+ /* Error detection and recovery- The statement below protects against
+ PCI devices with broken PCI capabilities lists. Detect a pointer
+ that is not u32 aligned, points into the first 64 reserved DWORDs
+ or points back to itself.
+ */
+ if (((temp & 3) != 0) || (temp == offset) || (temp < 0x40))
+ return;
+
+ *pCurrent = base + temp;
+ return;
+}
+
+
+void CALLCONV Amdmemcpy(void *pDst, const void *pSrc, u32 length)
+{
+ ASSERT(length <= 32768);
+ ASSERT(pDst != NULL);
+ ASSERT(pSrc != NULL);
+
+ while (length--){
+ // *(((u8*)pDst)++) = *(((u8*)pSrc)++);
+ *((u8*)pDst) = *((u8*)pSrc);
+ pDst++;
+ pSrc++;
+ }
+}
+
+
+void CALLCONV Amdmemset(void *pBuf, u8 val, u32 length)
+{
+ ASSERT(length <= 32768);
+ ASSERT(pBuf != NULL);
+
+ while (length--){
+ //*(((u8*)pBuf)++) = val;
+ *(((u8*)pBuf)) = val;
+ pBuf++;
+ }
+}
+
+
+u8 CALLCONV AmdBitScanReverse(u32 value)
+{
+ u8 i;
+
+ for (i = 31; i != 0xFF; i--)
+ {
+ if (value & ((u32)1 << i))
+ break;
+ }
+
+ return i;
+}
+
+
+u32 CALLCONV AmdRotateRight(u32 value, u8 size, u32 count)
+{
+ u32 msb, mask;
+ ASSERT(size > 0 && size <= 32);
+
+ msb = (u32)1 << (size-1);
+ mask = ((msb-1) << 1) + 1;
+
+ value = value & mask;
+
+ while (count--)
+ {
+ if (value & 1)
+ value = (value >> 1) | msb;
+ else
+ value = value >> 1;
+ }
+
+ return value;
+}
+
+
+u32 CALLCONV AmdRotateLeft(u32 value, u8 size, u32 count)
+{
+ u32 msb, mask;
+ ASSERT(size > 0 && size <= 32);
+
+ msb = (u32)1 << (size-1);
+ mask = ((msb-1) << 1) + 1;
+
+ value = value & mask;
+
+ while (count--)
+ {
+ if (value & msb)
+ value = ((value << 1) & mask) | (u32)1;
+ else
+ value = ((value << 1) & mask);
+ }
+
+ return value;
+}
+
+
+void CALLCONV AmdPCIRead(SBDFO loc, u32 *Value)
+{
+ /* Use LinuxBIOS PCI functions */
+ *Value = pci_read_config32((loc & 0xFFFFF000), SBDFO_OFF(loc));
+}
+
+
+void CALLCONV AmdPCIWrite(SBDFO loc, u32 *Value)
+{
+ /* Use LinuxBIOS PCI functions */
+ pci_write_config32((loc & 0xFFFFF000), SBDFO_OFF(loc), *Value);
+}
+
+
+void CALLCONV AmdMSRRead(uint32 Address, uint64 *Value)
+{
+ msr_t msr;
+
+ msr = rdmsr(Address);
+ Value->lo = msr.lo;
+ Value->hi = msr.hi;
+}
+
+
+void CALLCONV AmdMSRWrite(uint32 Address, uint64 *Value)
+{
+ msr_t msr;
+
+ msr.lo = Value->lo;
+ msr.hi = Value->hi;
+ wrmsr(Address, msr);
+}
+
+
+void ErrorStop(u32 value)
+{
+ printk_debug("Error: %08x ", value);
+
+}
+
+/*;----------------------------------------------------------------------------
+; void __pascal ErrorStop(DWORD Value);
+;
+; This implementation provides a rotating display of the error code on the
+; a port 80h POST display card. The rotation is used to make it easier to
+; view the error on both a 16-bit as well as a 32-bit display card.
+;
+; For use with SimNow the unrotated error code is also written to port 84h
+ErrorStop PROC FAR PASCAL PUBLIC Value:DWORD
+ pushad
+ mov eax, Value
+ mov bx, 0DEADh
+ out 84h, eax
+
+ErrorStopTop:
+ out 80h, eax
+
+ mov cx, 4 ; Rotate the display by one nibble
+@@:
+ bt bx, 15
+ rcl eax, 1
+ rcl bx, 1
+ loop @B
+
+
+ push eax ; Delay a few hundred milliseconds
+ push ebx
+ mov ecx, 10h ; TSC
+ db 00Fh, 032h ; RDMSR
+ mov ebx, eax
+@@:
+ db 00Fh, 032h ; RDMSR
+ sub eax, ebx
+ cmp eax, 500000000
+ jb @B
+ pop ebx
+ pop eax
+
+ jmp ErrorStopTop
+
+ popad
+ ret
+ErrorStop ENDP
+*/
diff --git a/src/northbridge/amd/amdht/comlib.h b/src/northbridge/amd/amdht/comlib.h
new file mode 100644
index 0000000000..08c6d111f1
--- /dev/null
+++ b/src/northbridge/amd/amdht/comlib.h
@@ -0,0 +1,59 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef COMLIB_H
+#define COMLIB_H
+
+#ifndef FILECODE
+#error "FILECODE was not defined, should be #define'd to 0xFxxx"
+#endif
+
+#include "porting.h"
+
+/* include LinuxBIOS pci functions */
+#include <device/pci_def.h>
+#include <device/pci_ids.h>
+
+#ifdef AMD_DEBUG
+ #define ASSERT(x) ((x) ? 0 : ErrorStop(((uint32)FILECODE)*0x10000 + ((__LINE__)%10) + (((__LINE__/10)%10)*0x10) + (((__LINE__/100)%10)*0x100) +(((__LINE__/1000)%10)*0x1000)))
+#else
+ #define ASSERT(x)
+#endif
+
+#ifdef AMD_DEBUG_ERROR_STOP
+ /* Macro to aid debugging, causes program to halt and display the line number of the halt in decimal */
+ #define STOP_HERE ErrorStop(((uint32)FILECODE)*0x10000 + ((__LINE__)%10) + (((__LINE__/10)%10)*0x10) + (((__LINE__/100)%10)*0x100) +(((__LINE__/1000)%10)*0x1000))
+#else
+ /* Macro to aid debugging, causes program to halt and display the line number of the halt in decimal */
+ /* #define STOP_HERE STOP_HERE_OnlyForDebugUse */
+ #define STOP_HERE
+#endif
+
+void CALLCONV AmdPCIReadBits(SBDFO loc, uint8 highbit, uint8 lowbit, uint32 *value);
+void CALLCONV AmdPCIWriteBits(SBDFO loc, uint8 highbit, uint8 lowbit, uint32 *value);
+void CALLCONV AmdPCIFindNextCap(SBDFO *current);
+
+void CALLCONV Amdmemcpy(void *dst, const void *src, uint32 length);
+void CALLCONV Amdmemset(void *buf, uint8 val, uint32 length);
+
+uint8 CALLCONV AmdBitScanReverse(uint32 value);
+uint32 CALLCONV AmdRotateRight(uint32 value, uint8 size, uint32 count);
+uint32 CALLCONV AmdRotateLeft(uint32 value, uint8 size, uint32 count);
+
+#endif
diff --git a/src/northbridge/amd/amdht/h3ffeat.h b/src/northbridge/amd/amdht/h3ffeat.h
new file mode 100644
index 0000000000..d415fee108
--- /dev/null
+++ b/src/northbridge/amd/amdht/h3ffeat.h
@@ -0,0 +1,177 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#ifndef H3FFEAT_H
+#define H3FFEAT_H
+
+/*----------------------------------------------------------------------------
+ * Mixed (DEFINITIONS AND MACROS / TYPEDEFS, STRUCTURES, ENUMS)
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/*-----------------------------------------------------------------------------
+ * DEFINITIONS AND MACROS
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+#define MAX_NODES 8
+#define MAX_LINKS 8
+#define MAX_PLATFORM_LINKS 64 /* 8x8 fully connected (28) + 4 chains with two HT devices */
+
+/* These following are internal definitions */
+#define ROUTETOSELF 0x0F
+#define INVALID_LINK 0xCC /* Used in port list data structure to mark unused data entries.
+ Can also be used for no link found in a port list search */
+
+/* definitions for working with the port list structure */
+#define PORTLIST_TYPE_CPU 0
+#define PORTLIST_TYPE_IO 1
+
+/*
+ * Hypertransport Capability definitions and macros
+ *
+ */
+
+/* HT Host Capability */
+/* bool isHTHostCapability(u32 reg) */
+#define IS_HT_HOST_CAPABILITY(reg) \
+ ((reg & (u32)0xE00000FF) == (u32)0x20000008)
+
+#define HT_HOST_CAP_SIZE 0x20
+
+/* Host CapabilityRegisters */
+#define HTHOST_LINK_CAPABILITY_REG 0x00
+#define HTHOST_LINK_CONTROL_REG 0x04
+#define HTHOST_FREQ_REV_REG 0x08
+ #define HT_HOST_REV_REV3 0x60
+#define HTHOST_FEATURE_CAP_REG 0x0C
+#define HTHOST_BUFFER_COUNT_REG 0x10
+#define HTHOST_ISOC_REG 0x14
+#define HTHOST_LINK_TYPE_REG 0x18
+ #define HTHOST_TYPE_COHERENT 3
+ #define HTHOST_TYPE_NONCOHERENT 7
+ #define HTHOST_TYPE_MASK 0x1F
+
+/* HT Slave Capability (HT1 compat) */
+#define IS_HT_SLAVE_CAPABILITY(reg) \
+ ((reg & (u32)0xE00000FF) == (u32)0x00000008)
+#define HTSLAVE_LINK01_OFFSET 4
+#define HTSLAVE_LINK_CONTROL_0_REG 4
+#define HTSLAVE_FREQ_REV_0_REG 0xC
+
+/* HT3 gen Capability */
+#define IS_HT_GEN3_CAPABILITY(reg) \
+ ((reg & (u32)0xF80000FF) == (u32)0xD0000008)
+#define HTGEN3_LINK01_OFFSET 0x10
+#define HTGEN3_LINK_TRAINING_0_REG 0x10
+
+/* HT3 Retry Capability */
+#define IS_HT_RETRY_CAPABILITY(reg) \
+ ((reg & (u32)0xF80000FF) == (u32)0xC0000008)
+
+#define HTRETRY_CONTROL_REG 4
+
+/* Unit ID Clumping Capability */
+#define IS_HT_UNITID_CAPABILITY(reg) \
+ ((reg & (u32)0xF80000FF) == (u32)0x90000008)
+
+#define HTUNIT_SUPPORT_REG 4
+#define HTUNIT_ENABLE_REG 8
+
+/*----------------------------------------------------------------------------
+ * TYPEDEFS, STRUCTURES, ENUMS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+typedef struct cNorthBridge cNorthBridge;
+
+/* A pair consists of a source node, a link to the destination node, the
+ * destination node, and its link back to source node. The even indices are
+ * the source nodes and links, and the odd indices are for the destination
+ * nodes and links.
+ */
+typedef struct
+{
+ /* This section is where the link is in the system and how to find it */
+ u8 Type; /* 0 = CPU, 1 = Device, all others reserved */
+ u8 Link; /* 0-1 for devices, 0-7 for CPUs */
+ u8 NodeID; /* The node, or a pointer to the devices parent node */
+ u8 HostLink, HostDepth; /* Link of parent node + depth in chain. Only used by devices */
+ SBDFO Pointer; /* A pointer to the device's slave HT capability, so we don't have to keep searching */
+
+ /* This section is for the final settings, which are written to hardware */
+ BOOL SelRegang; /* Only used for CPU->CPU links */
+ u8 SelWidthIn;
+ u8 SelWidthOut;
+ u8 SelFrequency;
+
+ /* This section is for keeping track of capabilities and possible configurations */
+ BOOL RegangCap;
+ u16 PrvFrequencyCap;
+ u8 PrvWidthInCap;
+ u8 PrvWidthOutCap;
+ u16 CompositeFrequencyCap;
+
+} sPortDescriptor;
+
+
+/*
+ * Our global state data structure
+ */
+typedef struct {
+ AMD_HTBLOCK *HtBlock;
+
+ u8 NodesDiscovered; /* One less than the number of nodes found in the system */
+ u8 TotalLinks;
+ u8 sysMpCap; /* The maximum number of nodes that all processors are capable of */
+
+ /* Two ports for each link
+ * Note: The Port pair 2*N and 2*N+1 are connected together to form a link
+ * (e.g. 0,1 and 8,9 are ports on either end of an HT link) The lower number
+ * port (2*N) is the source port. The device that owns the source port is
+ * always the device closer to the BSP. (i.e. nearer the CPU in a
+ * non-coherent chain, or the CPU with the lower NodeID).
+ */
+ sPortDescriptor PortList[MAX_PLATFORM_LINKS*2];
+
+ /* The number of coherent links comming off of each node (i.e. the 'Degree' of the node) */
+ u8 sysDegree[MAX_NODES];
+ /* The systems adjency (sysMatrix[i][j] is true if Node_i has a link to Node_j) */
+ BOOL sysMatrix[MAX_NODES][MAX_NODES];
+
+ /* Same as above, but for the currently selected database entry */
+ u8 dbDegree[MAX_NODES];
+ BOOL dbMatrix[MAX_NODES][MAX_NODES];
+
+ u8 Perm[MAX_NODES]; /* The node mapping from the database to the system */
+ u8 ReversePerm[MAX_NODES]; /* The node mapping from the system to the database */
+
+ /* Data for non-coherent initilization */
+ u8 AutoBusCurrent;
+ u8 UsedCfgMapEntires;
+
+ /* 'This' pointer for northbridge */
+ cNorthBridge *nb;
+} sMainData;
+
+#endif /* H3FFEAT_H */
diff --git a/src/northbridge/amd/amdht/h3finit.c b/src/northbridge/amd/amdht/h3finit.c
new file mode 100644
index 0000000000..dd3e3813bd
--- /dev/null
+++ b/src/northbridge/amd/amdht/h3finit.c
@@ -0,0 +1,1678 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ *----------------------------------------------------------------------------
+ * MODULES USED
+ *
+ *----------------------------------------------------------------------------
+ */
+
+#undef FILECODE
+#define FILECODE 0xF001
+
+#include "comlib.h"
+#include "h3finit.h"
+#include "h3ffeat.h"
+#include "h3ncmn.h"
+#include "h3gtopo.h"
+#include "AsPsNb.h"
+/* this is pre-ram so include the required C files here */
+#include "comlib.c"
+#include "AsPsNb.c"
+#include "h3ncmn.c"
+
+/*----------------------------------------------------------------------------
+ * DEFINITIONS AND MACROS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+#undef FILECODE
+#define FILECODE 0xF001
+
+/* APIC defines from amdgesa.inc, which can't be included in to c code. */
+#define APIC_Base_BSP 8
+#define APIC_Base 0x1b
+
+/*----------------------------------------------------------------------------
+ * TYPEDEFS AND STRUCTURES
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------
+ * PROTOTYPES OF LOCAL FUNCTIONS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------
+ * EXPORTED FUNCTIONS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------
+ * LOCAL FUNCTIONS
+ *
+ *----------------------------------------------------------------------------
+ */
+#ifndef HT_BUILD_NC_ONLY
+/*
+ **************************************************************************
+ * Routing table decompressor
+ **************************************************************************
+ */
+
+/*
+ **************************************************************************
+ * Graph Support routines
+ * These routines provide support for dealing with the graph representation
+ * of the topologies, along with the routing table information for that topology.
+ * The routing information is compressed and these routines currently decompress
+ * 'on the fly'. A graph is represented as a set of routes. All the edges in the
+ * graph are routes; a direct route from node i to node j exists in the graph IFF
+ * there is an edge directly connecting node i to node j. All other routes designate
+ * the edge which the route to that node initially takes, by designating a node
+ * to which a direct connection exists. That is, the route to non-adjacent node j
+ * from node i specifies node k where node i directly connects to node k.
+ *
+ *
+ * pseudo definition of compressed graph:
+ * typedef struct
+ * {
+ * BIT broadcast[8];
+ * uint4 responseRoute;
+ * uint4 requestRoute;
+ * } sRoute;
+ * typedef struct
+ * {
+ * u8 size;
+ * sRoute graph[size][size];
+ * } sGraph;
+ *
+ **************************************************************************
+ */
+
+/*----------------------------------------------------------------------------------------
+ * int
+ * graphHowManyNodes(u8 *graph)
+ *
+ * Description:
+ * Returns the number of nodes in the compressed graph
+ *
+ * Parameters:
+ * @param[in] u8 graph = a compressed graph
+ * @param[out] u8 results = the number of nodes in the graph
+ * ---------------------------------------------------------------------------------------
+ */
+int graphHowManyNodes(u8 *graph)
+{
+ return graph[0];
+}
+
+/*----------------------------------------------------------------------------------------
+ * BOOL
+ * graphIsAdjacent(u8 *graph, u8 nodeA, u8 nodeB)
+ *
+ * Description:
+ * Returns true if NodeA is directly connected to NodeB, false otherwise
+ * (if NodeA == NodeB also returns false)
+ * Relies on rule that directly connected nodes always route requests directly.
+ *
+ * Parameters:
+ * @param[in] u8 graph = the graph to examine
+ * @param[in] u8 nodeA = the node number of the first node
+ * @param[in] u8 nodeB = the node number of the second node
+ * @param[out] BOOL results = true if nodeA connects to nodeB false if not
+ * ---------------------------------------------------------------------------------------
+ */
+BOOL graphIsAdjacent(u8 *graph, u8 nodeA, u8 nodeB)
+{
+ u8 size = graph[0];
+ ASSERT(size <= MAX_NODES);
+ ASSERT((nodeA < size) && (nodeB < size));
+ return (graph[1+(nodeA*size+nodeB)*2+1] & 0x0F) == nodeB;
+}
+
+/*----------------------------------------------------------------------------------------
+ * u8
+ * graphGetRsp(u8 *graph, u8 nodeA, u8 nodeB)
+ *
+ * Description:
+ * Returns the graph node used by nodeA to route responses targeted at nodeB.
+ * This will be a node directly connected to nodeA (possibly nodeB itself),
+ * or "Route to Self" if nodeA and nodeB are the same node.
+ * Note that all node numbers are abstract node numbers of the topology graph,
+ * it is the responsibility of the caller to apply any permutation needed.
+ *
+ * Parameters:
+ * @param[in] u8 graph = the graph to examine
+ * @param[in] u8 nodeA = the node number of the first node
+ * @param[in] u8 nodeB = the node number of the second node
+ * @param[out] u8 results = The response route node
+ * ---------------------------------------------------------------------------------------
+ */
+u8 graphGetRsp(u8 *graph, u8 nodeA, u8 nodeB)
+{
+ u8 size = graph[0];
+ ASSERT(size <= MAX_NODES);
+ ASSERT((nodeA < size) && (nodeB < size));
+ return (graph[1+(nodeA*size+nodeB)*2+1] & 0xF0)>>4;
+}
+
+/*----------------------------------------------------------------------------------------
+ * u8
+ * graphGetReq(u8 *graph, u8 nodeA, u8 nodeB)
+ *
+ * Description:
+ * Returns the graph node used by nodeA to route requests targeted at nodeB.
+ * This will be a node directly connected to nodeA (possibly nodeB itself),
+ * or "Route to Self" if nodeA and nodeB are the same node.
+ * Note that all node numbers are abstract node numbers of the topology graph,
+ * it is the responsibility of the caller to apply any permutation needed.
+ *
+ * Parameters:
+ * @param[in] u8 graph = the graph to examine
+ * @param[in] u8 nodeA = the node number of the first node
+ * @param[in] u8 nodeB = the node number of the second node
+ * @param[out] u8 results = The request route node
+ * ---------------------------------------------------------------------------------------
+ */
+u8 graphGetReq(u8 *graph, u8 nodeA, u8 nodeB)
+{
+ int size = graph[0];
+ ASSERT(size <= MAX_NODES);
+ ASSERT((nodeA < size) && (nodeB < size));
+ return (graph[1+(nodeA*size+nodeB)*2+1] & 0x0F);
+}
+
+/*----------------------------------------------------------------------------------------
+ * u8
+ * graphGetBc(unsigned char *graph, int nodeA, int nodeB)
+ *
+ * Description:
+ * Returns a bit vector of nodes that nodeA should forward a broadcast from
+ * nodeB towards
+ *
+ * Parameters:
+ * @param[in] u8 graph = the graph to examine
+ * @param[in] u8 nodeA = the node number of the first node
+ * @param[in] u8 nodeB = the node number of the second node
+ * OU u8 results = the broadcast routes for nodeA from nodeB
+ * ---------------------------------------------------------------------------------------
+ */
+u8 graphGetBc(unsigned char *graph, int nodeA, int nodeB)
+{
+ int size = graph[0];
+ ASSERT(size <= MAX_NODES);
+ ASSERT((nodeA < size) && (nodeB < size));
+ return graph[1+(nodeA*size+nodeB)*2];
+}
+
+
+/***************************************************************************
+ *** GENERIC HYPERTRANSPORT DISCOVERY CODE ***
+ ***************************************************************************/
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * routeFromBSP(u8 targetNode, u8 actualTarget, sMainData *pDat)
+ *
+ * Description:
+ * Ensure a request / response route from target node to bsp. Since target node is
+ * always a predecessor of actual target node, each node gets a route to actual target
+ * on the link that goes to target. The routing produced by this routine is adequate
+ * for config access during discovery, but NOT for coherency.
+ *
+ * Parameters:
+ * @param[in] u8 targetNode = the path to actual target goes through target
+ * @param[in] u8 actualTarget = the ultimate target being routed to
+ * @param[in] sMainData* pDat = our global state, port config info
+ * ---------------------------------------------------------------------------------------
+ */
+void routeFromBSP(u8 targetNode, u8 actualTarget, sMainData *pDat)
+{
+ u8 predecessorNode, predecessorLink, currentPair;
+
+ if (targetNode == 0)
+ return; // BSP has no predecessor, stop
+
+ // Search for the link that connects targetNode to its predecessor
+ currentPair = 0;
+ while (pDat->PortList[currentPair*2+1].NodeID != targetNode)
+ {
+ currentPair++;
+ ASSERT(currentPair < pDat->TotalLinks);
+ }
+
+ predecessorNode = pDat->PortList[currentPair*2].NodeID;
+ predecessorLink = pDat->PortList[currentPair*2].Link;
+
+ // Recursively call self to ensure the route from the BSP to the Predecessor
+ // Node is established
+ routeFromBSP(predecessorNode, actualTarget, pDat);
+
+ pDat->nb->writeRoutingTable(predecessorNode, actualTarget, predecessorLink, pDat->nb);
+}
+
+/*----------------------------------------------------------------------------------------
+ * u8
+ * convertNodeToLink(u8 srcNode, u8 targetNode, sMainData *pDat)
+ *
+ * Description:
+ * Return the link on source node which connects to target node
+ *
+ * Parameters:
+ * @param[in] u8 srcNode = the source node
+ * @param[in] u8 targetNode = the target node to find the link to
+ * @param[in] sMainData* pDat = our global state
+ * @param[out] u8 results = the link on source which connects to target
+ * ---------------------------------------------------------------------------------------
+ */
+u8 convertNodeToLink(u8 srcNode, u8 targetNode, sMainData *pDat)
+{
+ u8 targetlink = INVALID_LINK;
+ u8 k;
+
+ for (k = 0; k < pDat->TotalLinks*2; k += 2)
+ {
+ if ((pDat->PortList[k+0].NodeID == srcNode) && (pDat->PortList[k+1].NodeID == targetNode))
+ {
+ targetlink = pDat->PortList[k+0].Link;
+ break;
+ }
+ else if ((pDat->PortList[k+1].NodeID == srcNode) && (pDat->PortList[k+0].NodeID == targetNode))
+ {
+ targetlink = pDat->PortList[k+1].Link;
+ break;
+ }
+ }
+ ASSERT(targetlink != INVALID_LINK);
+
+ return targetlink;
+}
+
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * htDiscoveryFloodFill(sMainData *pDat)
+ *
+ * Description:
+ * Discover all coherent devices in the system, initializing some basics like node IDs
+ * and total nodes found in the process. As we go we also build a representation of the
+ * discovered system which we will use later to program the routing tables. During this
+ * step, the routing is via default link back to BSP and to each new node on the link it
+ * was discovered on (no coherency is active yet).
+ *
+ * Parameters:
+ * @param[in] sMainData* pDat = our global state
+ * ---------------------------------------------------------------------------------------
+ */
+void htDiscoveryFloodFill(sMainData *pDat)
+{
+ u8 currentNode = 0;
+ u8 currentLink;
+
+ /* Entries are always added in pairs, the even indices are the 'source'
+ * side closest to the BSP, the odd indices are the 'destination' side
+ */
+
+ while (currentNode <= pDat->NodesDiscovered)
+ {
+ u32 temp;
+
+ if (currentNode != 0)
+ {
+ /* Set path from BSP to currentNode */
+ routeFromBSP(currentNode, currentNode, pDat);
+
+ /* Set path from BSP to currentNode for currentNode+1 if
+ * currentNode+1 != MAX_NODES
+ */
+ if (currentNode+1 != MAX_NODES)
+ routeFromBSP(currentNode, currentNode+1, pDat);
+
+ /* Configure currentNode to route traffic to the BSP through its
+ * default link
+ */
+ pDat->nb->writeRoutingTable(currentNode, 0, pDat->nb->readDefLnk(currentNode, pDat->nb), pDat->nb);
+ }
+
+ /* Set currentNode's NodeID field to currentNode */
+ pDat->nb->writeNodeID(currentNode, currentNode, pDat->nb);
+
+ /* Enable routing tables on currentNode*/
+ pDat->nb->enableRoutingTables(currentNode, pDat->nb);
+
+ for (currentLink = 0; currentLink < pDat->nb->maxLinks; currentLink++)
+ {
+ BOOL linkfound;
+ u8 token;
+
+ if (pDat->HtBlock->AMD_CB_IgnoreLink && pDat->HtBlock->AMD_CB_IgnoreLink(currentNode, currentLink))
+ continue;
+
+ if (pDat->nb->readTrueLinkFailStatus(currentNode, currentLink, pDat, pDat->nb))
+ continue;
+
+ /* Make sure that the link is connected, coherent, and ready */
+ if (!pDat->nb->verifyLinkIsCoherent(currentNode, currentLink, pDat->nb))
+ continue;
+
+
+ /* Test to see if the currentLink has already been explored */
+ linkfound = FALSE;
+ for (temp = 0; temp < pDat->TotalLinks; temp++)
+ {
+ if ((pDat->PortList[temp*2+1].NodeID == currentNode) &&
+ (pDat->PortList[temp*2+1].Link == currentLink))
+ {
+ linkfound = TRUE;
+ break;
+ }
+ }
+ if (linkfound)
+ {
+ /* We had already expored this link */
+ continue;
+ }
+
+ if (pDat->nb->handleSpecialLinkCase(currentNode, currentLink, pDat, pDat->nb))
+ {
+ continue;
+ }
+
+ /* Modify currentNode's routing table to use currentLink to send
+ * traffic to currentNode+1
+ */
+ pDat->nb->writeRoutingTable(currentNode, currentNode+1, currentLink, pDat->nb);
+
+ /* Check the northbridge of the node we just found, to make sure it is compatible
+ * before doing anything else to it.
+ */
+ if (!pDat->nb->isCompatible(currentNode+1, pDat->nb))
+ {
+ u8 nodeToKill;
+
+ /* Notify BIOS of event (while variables are still the same) */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventCohFamilyFeud evt = {sizeof(sHtEventCohFamilyFeud),
+ currentNode,
+ currentLink,
+ pDat->NodesDiscovered};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_ERROR,
+ HT_EVENT_COH_FAMILY_FEUD,
+ (u8 *)&evt);
+ }
+
+ /* If node is not compatible, force boot to 1P
+ * If they are not compatible stop cHT init and:
+ * 1. Disable all cHT links on the BSP
+ * 2. Configure the BSP routing tables as a UP.
+ * 3. Notify main BIOS.
+ */
+ pDat->NodesDiscovered = 0;
+ currentNode = 0;
+ pDat->TotalLinks = 0;
+ /* Abandon our coherent link data structure. At this point there may
+ * be coherent links on the BSP that are not yet in the portList, and
+ * we have to turn them off anyway. So depend on the hardware to tell us.
+ */
+ for (currentLink = 0; currentLink < pDat->nb->maxLinks; currentLink++)
+ {
+ /* Stop all links which are connected, coherent, and ready */
+ if (pDat->nb->verifyLinkIsCoherent(currentNode, currentLink, pDat->nb))
+ pDat->nb->stopLink(currentNode, currentLink, pDat->nb);
+ }
+
+ for (nodeToKill = 0; nodeToKill < pDat->nb->maxNodes; nodeToKill++)
+ {
+ pDat->nb->writeFullRoutingTable(0, nodeToKill, ROUTETOSELF, ROUTETOSELF, 0, pDat->nb);
+ }
+
+ /* End Coherent Discovery */
+ STOP_HERE;
+ break;
+ }
+
+ /* Read token from Current+1 */
+ token = pDat->nb->readToken(currentNode+1, pDat->nb);
+ ASSERT(token <= pDat->NodesDiscovered);
+ if (token == 0)
+ {
+ pDat->NodesDiscovered++;
+ ASSERT(pDat->NodesDiscovered < pDat->nb->maxNodes);
+ /* Check the capability of northbridges against the currently known configuration */
+ if (!pDat->nb->isCapable(currentNode+1, pDat, pDat->nb))
+ {
+ u8 nodeToKill;
+
+ /* Notify BIOS of event */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventCohMpCapMismatch evt = {sizeof(sHtEventCohMpCapMismatch),
+ currentNode,
+ currentLink,
+ pDat->sysMpCap,
+ pDat->NodesDiscovered};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_ERROR,
+ HT_EVENT_COH_MPCAP_MISMATCH,
+ (u8 *)&evt);
+ }
+
+ pDat->NodesDiscovered = 0;
+ currentNode = 0;
+ pDat->TotalLinks = 0;
+
+ for (nodeToKill = 0; nodeToKill < pDat->nb->maxNodes; nodeToKill++)
+ {
+ pDat->nb->writeFullRoutingTable(0, nodeToKill, ROUTETOSELF, ROUTETOSELF, 0, pDat->nb);
+ }
+
+ /* End Coherent Discovery */
+ STOP_HERE;
+ break;
+ }
+
+ token = pDat->NodesDiscovered;
+ pDat->nb->writeToken(currentNode+1, token, pDat->nb);
+ /* Inform that we have discovered a node, so that logical id to
+ * socket mapping info can be recorded.
+ */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventCohNodeDiscovered evt = {sizeof(sHtEventCohNodeDiscovered),
+ currentNode,
+ currentLink,
+ token};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_INFO,
+ HT_EVENT_COH_NODE_DISCOVERED,
+ (u8 *)&evt);
+ }
+ }
+
+ if (pDat->TotalLinks == MAX_PLATFORM_LINKS)
+ {
+ /*
+ * Exceeded our capacity to describe all coherent links found in the system.
+ * Error strategy:
+ * Auto recovery is not possible because data space is already all used.
+ * If the callback is not implemented or returns we will continue to initialize
+ * the fabric we are capable of representing, adding no more nodes or links.
+ * This should yield a bootable topology, but likely not the one intended.
+ * We cannot continue discovery, there may not be any way to route a new
+ * node back to the BSP if we can't add links to our representation of the system.
+ */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventCohLinkExceed evt = {sizeof(sHtEventCohLinkExceed),
+ currentNode,
+ currentLink,
+ token,
+ pDat->NodesDiscovered,
+ pDat->nb->maxLinks};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_ERROR,
+ HT_EVENT_COH_LINK_EXCEED,
+ (u8 *)&evt);
+ }
+ /* Force link and node loops to halt */
+ STOP_HERE;
+ currentNode = pDat->NodesDiscovered;
+ break;
+ }
+
+ pDat->PortList[pDat->TotalLinks*2].Type = PORTLIST_TYPE_CPU;
+ pDat->PortList[pDat->TotalLinks*2].Link = currentLink;
+ pDat->PortList[pDat->TotalLinks*2].NodeID = currentNode;
+
+ pDat->PortList[pDat->TotalLinks*2+1].Type = PORTLIST_TYPE_CPU;
+ pDat->PortList[pDat->TotalLinks*2+1].Link = pDat->nb->readDefLnk(currentNode+1, pDat->nb);
+ pDat->PortList[pDat->TotalLinks*2+1].NodeID = token;
+
+ pDat->TotalLinks++;
+
+ if ( !pDat->sysMatrix[currentNode][token] )
+ {
+ pDat->sysDegree[currentNode]++;
+ pDat->sysDegree[token]++;
+ pDat->sysMatrix[currentNode][token] = TRUE;
+ pDat->sysMatrix[token][currentNode] = TRUE;
+ }
+ }
+ currentNode++;
+ }
+}
+
+
+/***************************************************************************
+ *** ISOMORPHISM BASED ROUTING TABLE GENERATION CODE ***
+ ***************************************************************************/
+
+/*----------------------------------------------------------------------------------------
+ * BOOL
+ * isoMorph(u8 i, sMainData *pDat)
+ *
+ * Description:
+ * Is graphA isomorphic to graphB?
+ * if this function returns true, then Perm will contain the permutation
+ * required to transform graphB into graphA.
+ * We also use the degree of each node, that is the number of connections it has, to
+ * speed up rejection of non-isomorphic graphs (if there is a node in graphA with n
+ * connections, there must be at least one unmatched in graphB with n connections).
+ *
+ * Parameters:
+ * @param[in] u8 i = the discovered node which we are trying to match
+ * with a permutation the topology
+ * @param[in]/@param[out] sMainData* pDat = our global state, degree and adjacency matrix,
+ * output a permutation if successful
+ * @param[out] BOOL results = the graphs are (or are not) isomorphic
+ * ---------------------------------------------------------------------------------------
+ */
+BOOL isoMorph(u8 i, sMainData *pDat)
+{
+ u8 j, k;
+ u8 nodecnt;
+
+ /* We have only been called if nodecnt == pSelected->size ! */
+ nodecnt = pDat->NodesDiscovered+1;
+
+ if (i != nodecnt)
+ {
+ // Keep building the permutation
+ for (j = 0; j < nodecnt; j++)
+ {
+ // Make sure the degree matches
+ if (pDat->sysDegree[i] != pDat->dbDegree[j])
+ continue;
+
+ // Make sure that j hasn't been used yet (ought to use a "used"
+ // array instead, might be faster)
+ for (k = 0; k < i; k++)
+ {
+ if (pDat->Perm[k] == j)
+ break;
+ }
+ if (k != i)
+ continue;
+ pDat->Perm[i] = j;
+ if (isoMorph(i+1, pDat))
+ return TRUE;
+ }
+ return FALSE;
+ } else {
+ // Test to see if the permutation is isomorphic
+ for (j = 0; j < nodecnt; j++)
+ {
+ for (k = 0; k < nodecnt; k++)
+ {
+ if ( pDat->sysMatrix[j][k] !=
+ pDat->dbMatrix[pDat->Perm[j]][pDat->Perm[k]] )
+ return FALSE;
+ }
+ }
+ return TRUE;
+ }
+}
+
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * lookupComputeAndLoadRoutingTables(sMainData *pDat)
+ *
+ * Description:
+ * Using the description of the fabric topology we discovered, try to find a match
+ * among the supported topologies. A supported topology description matches
+ * the discovered fabric if the nodes can be matched in such a way that all the nodes connected
+ * in one set are exactly the nodes connected in the other (formally, that the graphs are
+ * isomorphic). Which links are used is not really important to matching. If the graphs
+ * match, then there is a permutation of one that translates the node positions and linkages
+ * to the other.
+ *
+ * In order to make the isomorphism test efficient, we test for matched number of nodes
+ * (a 4 node fabric is not isomorphic to a 2 node topology), and provide degrees of nodes
+ * to the isomorphism test.
+ *
+ * The generic routing table solution for any topology is predetermined and represented
+ * as part of the topology. The permutation we computed tells us how to interpret the
+ * routing onto the fabric we discovered. We do this working backward from the last
+ * node discovered to the BSP, writing the routing tables as we go.
+ *
+ * Parameters:
+ * @param[in] sMainData* pDat = our global state, the discovered fabric,
+ * @param[out] degree matrix, permutation
+ * ---------------------------------------------------------------------------------------
+ */
+void lookupComputeAndLoadRoutingTables(sMainData *pDat)
+{
+ u8 **pTopologyList;
+ u8 *pSelected;
+
+ int i, j, k, size;
+
+ size = pDat->NodesDiscovered + 1;
+ /* Use the provided topology list or the internal, default one. */
+ pTopologyList = pDat->HtBlock->topolist;
+ if (pTopologyList == NULL)
+ {
+ getAmdTopolist(&pTopologyList);
+ }
+
+ pSelected = *pTopologyList;
+ while (pSelected != NULL)
+ {
+ if (graphHowManyNodes(pSelected) == size)
+ {
+ // Build Degree vector and Adjency Matrix for this entry
+ for (i = 0; i < size; i++)
+ {
+ pDat->dbDegree[i] = 0;
+ for (j = 0; j < size; j++)
+ {
+ if (graphIsAdjacent(pSelected, i, j))
+ {
+ pDat->dbMatrix[i][j] = 1;
+ pDat->dbDegree[i]++;
+ }
+ else
+ {
+ pDat->dbMatrix[i][j] = 0;
+ }
+ }
+ }
+ if (isoMorph(0, pDat))
+ break; // A matching topology was found
+ }
+
+ pTopologyList++;
+ pSelected = *pTopologyList;
+ }
+
+ if (pSelected != NULL)
+ {
+ // Compute the reverse Permutation
+ for (i = 0; i < size; i++)
+ {
+ pDat->ReversePerm[pDat->Perm[i]] = i;
+ }
+
+ // Start with the last discovered node, and move towards the BSP
+ for (i = size-1; i >= 0; i--)
+ {
+ for (j = 0; j < size; j++)
+ {
+ u8 ReqTargetLink, RspTargetLink;
+ u8 ReqTargetNode, RspTargetNode;
+
+ u8 AbstractBcTargetNodes = graphGetBc(pSelected, pDat->Perm[i], pDat->Perm[j]);
+ u32 BcTargetLinks = 0;
+
+ for (k = 0; k < MAX_NODES; k++)
+ {
+ if (AbstractBcTargetNodes & ((u32)1<<k))
+ {
+ BcTargetLinks |= (u32)1 << convertNodeToLink(i, pDat->ReversePerm[k], pDat);
+ }
+ }
+
+ if (i == j)
+ {
+ ReqTargetLink = ROUTETOSELF;
+ RspTargetLink = ROUTETOSELF;
+ }
+ else
+ {
+ ReqTargetNode = graphGetReq(pSelected, pDat->Perm[i], pDat->Perm[j]);
+ ReqTargetLink = convertNodeToLink(i, pDat->ReversePerm[ReqTargetNode], pDat);
+
+ RspTargetNode = graphGetRsp(pSelected, pDat->Perm[i], pDat->Perm[j]);
+ RspTargetLink = convertNodeToLink(i, pDat->ReversePerm[RspTargetNode], pDat);
+ }
+
+ pDat->nb->writeFullRoutingTable(i, j, ReqTargetLink, RspTargetLink, BcTargetLinks, pDat->nb);
+ }
+ /* Clean up discovery 'footprint' that otherwise remains in the routing table. It didn't hurt
+ * anything, but might cause confusion during debug and validation. Do this by setting the
+ * route back to all self routes. Since it's the node that would be one more than actually installed,
+ * this only applies if less than maxNodes were found.
+ */
+ if (size < pDat->nb->maxNodes)
+ {
+ pDat->nb->writeFullRoutingTable(i, size, ROUTETOSELF, ROUTETOSELF, 0, pDat->nb);
+ }
+ }
+
+ }
+ else
+ {
+ /*
+ * No Matching Topology was found
+ * Error Strategy:
+ * Auto recovery doesn't seem likely, Force boot as 1P.
+ * For reporting, logging, provide number of nodes
+ * If not implemented or returns, boot as BSP uniprocessor.
+ */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventCohNoTopology evt = {sizeof(sHtEventCohNoTopology),
+ pDat->NodesDiscovered};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_ERROR,
+ HT_EVENT_COH_NO_TOPOLOGY,
+ (u8 *)&evt);
+ }
+ STOP_HERE;
+ /* Force 1P */
+ pDat->NodesDiscovered = 0;
+ pDat->TotalLinks = 0;
+ pDat->nb->enableRoutingTables(0, pDat->nb);
+ }
+}
+#endif /* HT_BUILD_NC_ONLY */
+
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * finializeCoherentInit(sMainData *pDat)
+ *
+ * Description:
+ * Find the total number of cores and update the number of nodes and cores in all cpus.
+ * Limit cpu config access to installed cpus.
+ *
+ * Parameters:
+ * @param[in] sMainData* pDat = our global state, number of nodes discovered.
+ * ---------------------------------------------------------------------------------------
+ */
+void finializeCoherentInit(sMainData *pDat)
+{
+ u8 curNode;
+
+ u8 totalCores = 0;
+ for (curNode = 0; curNode < pDat->NodesDiscovered+1; curNode++)
+ {
+ totalCores += pDat->nb->getNumCoresOnNode(curNode, pDat->nb);
+ }
+
+ for (curNode = 0; curNode < pDat->NodesDiscovered+1; curNode++)
+ {
+ pDat->nb->setTotalNodesAndCores(curNode, pDat->NodesDiscovered+1, totalCores, pDat->nb);
+ }
+
+ for (curNode = 0; curNode < pDat->NodesDiscovered+1; curNode++)
+ {
+ pDat->nb->limitNodes(curNode, pDat->nb);
+ }
+
+}
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * coherentInit(sMainData *pDat)
+ *
+ * Description:
+ * Perform discovery and initialization of the coherent fabric.
+ *
+ * Parameters:
+ * @param[in] sMainData* pDat = our global state
+ * ---------------------------------------------------------------------------------------
+ */
+void coherentInit(sMainData *pDat)
+{
+ int i, j;
+
+#ifdef HT_BUILD_NC_ONLY
+ /* Replace discovery process with:
+ * No other nodes, no coherent links
+ * Enable routing tables on currentNode, for power on self route
+ */
+ pDat->NodesDiscovered = 0;
+ pDat->TotalLinks = 0;
+ pDat->nb->enableRoutingTables(0, pDat->nb);
+#else
+ pDat->NodesDiscovered = 0;
+ pDat->TotalLinks = 0;
+ for (i = 0; i < MAX_NODES; i++)
+ {
+ pDat->sysDegree[i] = 0;
+ for (j = 0; j < MAX_NODES; j++)
+ {
+ pDat->sysMatrix[i][j] = 0;
+ }
+ }
+
+ htDiscoveryFloodFill(pDat);
+ lookupComputeAndLoadRoutingTables(pDat);
+#endif
+ finializeCoherentInit(pDat);
+}
+
+/***************************************************************************
+ *** Non-coherent init code ***
+ *** Algorithms ***
+ ***************************************************************************/
+/*----------------------------------------------------------------------------------------
+ * void
+ * processLink(u8 node, u8 link, sMainData *pDat)
+ *
+ * Description:
+ * Process a non-coherent link, enabling a range of bus numbers, and setting the device
+ * ID for all devices found
+ *
+ * Parameters:
+ * @param[in] u8 node = Node on which to process nc init
+ * @param[in] u8 link = The non-coherent link on that node
+ * @param[in] sMainData* pDat = our global state
+ * ---------------------------------------------------------------------------------------
+ */
+void processLink(u8 node, u8 link, sMainData *pDat)
+{
+ u8 secBus, subBus;
+ u32 currentBUID;
+ u32 temp;
+ u32 unitIDcnt;
+ SBDFO currentPtr;
+ u8 depth;
+ u8 *pSwapPtr;
+
+ SBDFO lastSBDFO = ILLEGAL_SBDFO;
+ u8 lastLink = 0;
+
+ ASSERT(node < pDat->nb->maxNodes && link < pDat->nb->maxLinks);
+
+ if ((pDat->HtBlock->AMD_CB_OverrideBusNumbers == NULL)
+ || !pDat->HtBlock->AMD_CB_OverrideBusNumbers(node, link, &secBus, &subBus))
+ {
+ /* Assign Bus numbers */
+ if (pDat->AutoBusCurrent >= pDat->HtBlock->AutoBusMax)
+ {
+ /* If we run out of Bus Numbers notify, if call back unimplemented or if it
+ * returns, skip this chain
+ */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHTEventNcohBusMaxExceed evt = {sizeof(sHTEventNcohBusMaxExceed), node, link, pDat->AutoBusCurrent};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_ERROR,HT_EVENT_NCOH_BUS_MAX_EXCEED,(u8 *)&evt);
+ }
+ STOP_HERE;
+ return;
+ }
+
+ if (pDat->UsedCfgMapEntires >= 4)
+ {
+ /* If we have used all the PCI Config maps we can't add another chain.
+ * Notify and if call back is unimplemented or returns, skip this chain.
+ */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventNcohCfgMapExceed evt = {sizeof(sHtEventNcohCfgMapExceed), node, link};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_ERROR,
+ HT_EVENT_NCOH_CFG_MAP_EXCEED,
+ (u8 *)&evt);
+ }
+ STOP_HERE;
+ return;
+ }
+
+ secBus = pDat->AutoBusCurrent;
+ subBus = secBus + pDat->HtBlock->AutoBusIncrement-1;
+ pDat->AutoBusCurrent += pDat->HtBlock->AutoBusIncrement;
+ }
+
+ pDat->nb->setCFGAddrMap(pDat->UsedCfgMapEntires, secBus, subBus, node, link, pDat, pDat->nb);
+ pDat->UsedCfgMapEntires++;
+
+ if ((pDat->HtBlock->AMD_CB_ManualBUIDSwapList != NULL)
+ && pDat->HtBlock->AMD_CB_ManualBUIDSwapList(node, link, &pSwapPtr))
+ {
+ /* Manual non-coherent BUID assignment */
+
+ /* Assign BUID's per manual override */
+ while (*pSwapPtr != 0xFF)
+ {
+ currentPtr = MAKE_SBDFO(0, secBus, *pSwapPtr, 0, 0);
+ pSwapPtr++;
+
+ do
+ {
+ AmdPCIFindNextCap(&currentPtr);
+ ASSERT(currentPtr != ILLEGAL_SBDFO);
+ AmdPCIRead(currentPtr, &temp);
+ } while (!IS_HT_SLAVE_CAPABILITY(temp));
+
+ currentBUID = *pSwapPtr;
+ pSwapPtr++;
+ AmdPCIWriteBits(currentPtr, 20, 16, &currentBUID);
+ }
+
+ /* Build chain of devices */
+ depth = 0;
+ pSwapPtr++;
+ while (*pSwapPtr != 0xFF)
+ {
+ pDat->PortList[pDat->TotalLinks*2].NodeID = node;
+ if (depth == 0)
+ {
+ pDat->PortList[pDat->TotalLinks*2].Type = PORTLIST_TYPE_CPU;
+ pDat->PortList[pDat->TotalLinks*2].Link = link;
+ }
+ else
+ {
+ pDat->PortList[pDat->TotalLinks*2].Type = PORTLIST_TYPE_IO;
+ pDat->PortList[pDat->TotalLinks*2].Link = 1-lastLink;
+ pDat->PortList[pDat->TotalLinks*2].HostLink = link;
+ pDat->PortList[pDat->TotalLinks*2].HostDepth = depth-1;
+ pDat->PortList[pDat->TotalLinks*2].Pointer = lastSBDFO;
+ }
+
+ pDat->PortList[pDat->TotalLinks*2+1].Type = PORTLIST_TYPE_IO;
+ pDat->PortList[pDat->TotalLinks*2+1].NodeID = node;
+ pDat->PortList[pDat->TotalLinks*2+1].HostLink = link;
+ pDat->PortList[pDat->TotalLinks*2+1].HostDepth = depth;
+
+ currentPtr = MAKE_SBDFO(0, secBus, (*pSwapPtr & 0x3F), 0, 0);
+ do
+ {
+ AmdPCIFindNextCap(&currentPtr);
+ ASSERT(currentPtr != ILLEGAL_SBDFO);
+ AmdPCIRead(currentPtr, &temp);
+ } while (!IS_HT_SLAVE_CAPABILITY(temp));
+ pDat->PortList[pDat->TotalLinks*2+1].Pointer = currentPtr;
+ lastSBDFO = currentPtr;
+
+ /* Bit 6 indicates whether orientation override is desired.
+ * Bit 7 indicates the upstream link if overriding.
+ */
+ /* assert catches at least the one known incorrect setting */
+ ASSERT ((*pSwapPtr & 0x40) || (!(*pSwapPtr & 0x80)));
+ if (*pSwapPtr & 0x40)
+ {
+ /* Override the device's orientation */
+ lastLink = *pSwapPtr >> 7;
+ }
+ else
+ {
+ /* Detect the device's orientation */
+ AmdPCIReadBits(currentPtr, 26, 26, &temp);
+ lastLink = (u8)temp;
+ }
+ pDat->PortList[pDat->TotalLinks*2+1].Link = lastLink;
+
+ depth++;
+ pDat->TotalLinks++;
+ pSwapPtr++;
+ }
+ }
+ else
+ {
+ /* Automatic non-coherent device detection */
+ depth = 0;
+ currentBUID = 1;
+ while (1)
+ {
+ currentPtr = MAKE_SBDFO(0, secBus, 0, 0, 0);
+
+ AmdPCIRead(currentPtr, &temp);
+ if (temp == 0xFFFFFFFF)
+ /* No device found at currentPtr */
+ break;
+
+ if (pDat->TotalLinks == MAX_PLATFORM_LINKS)
+ {
+ /*
+ * Exceeded our capacity to describe all non-coherent links found in the system.
+ * Error strategy:
+ * Auto recovery is not possible because data space is already all used.
+ */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventNcohLinkExceed evt = {sizeof(sHtEventNcohLinkExceed),
+ node,
+ link,
+ depth,
+ pDat->nb->maxLinks};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_ERROR,
+ HT_EVENT_NCOH_LINK_EXCEED,
+ (u8 *)&evt);
+ }
+ /* Force link loop to halt */
+ STOP_HERE;
+ break;
+ }
+
+ pDat->PortList[pDat->TotalLinks*2].NodeID = node;
+ if (depth == 0)
+ {
+ pDat->PortList[pDat->TotalLinks*2].Type = PORTLIST_TYPE_CPU;
+ pDat->PortList[pDat->TotalLinks*2].Link = link;
+ }
+ else
+ {
+ pDat->PortList[pDat->TotalLinks*2].Type = PORTLIST_TYPE_IO;
+ pDat->PortList[pDat->TotalLinks*2].Link = 1-lastLink;
+ pDat->PortList[pDat->TotalLinks*2].HostLink = link;
+ pDat->PortList[pDat->TotalLinks*2].HostDepth = depth-1;
+ pDat->PortList[pDat->TotalLinks*2].Pointer = lastSBDFO;
+ }
+
+ pDat->PortList[pDat->TotalLinks*2+1].Type = PORTLIST_TYPE_IO;
+ pDat->PortList[pDat->TotalLinks*2+1].NodeID = node;
+ pDat->PortList[pDat->TotalLinks*2+1].HostLink = link;
+ pDat->PortList[pDat->TotalLinks*2+1].HostDepth = depth;
+
+ do
+ {
+ AmdPCIFindNextCap(&currentPtr);
+ ASSERT(currentPtr != ILLEGAL_SBDFO);
+ AmdPCIRead(currentPtr, &temp);
+ } while (!IS_HT_SLAVE_CAPABILITY(temp));
+
+ AmdPCIReadBits(currentPtr, 25, 21, &unitIDcnt);
+ if ((unitIDcnt + currentBUID > 31) || ((secBus == 0) && (unitIDcnt + currentBUID > 24)))
+ {
+ /* An error handler for the case where we run out of BUID's on a chain */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventNcohBuidExceed evt = {sizeof(sHtEventNcohBuidExceed),
+ node, link, depth, (u8)currentBUID, (u8)unitIDcnt};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_ERROR,HT_EVENT_NCOH_BUID_EXCEED,(u8 *)&evt);
+ }
+ STOP_HERE;
+ break;
+ }
+ AmdPCIWriteBits(currentPtr, 20, 16, &currentBUID);
+
+
+ currentPtr += MAKE_SBDFO(0, 0, currentBUID, 0, 0);
+ AmdPCIReadBits(currentPtr, 20, 16, &temp);
+ if (temp != currentBUID)
+ {
+ /* An error handler for this critical error */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventNcohDeviceFailed evt = {sizeof(sHtEventNcohDeviceFailed),
+ node, link, depth, (u8)currentBUID};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_ERROR,HT_EVENT_NCOH_DEVICE_FAILED,(u8 *)&evt);
+ }
+ STOP_HERE;
+ break;
+ }
+
+ AmdPCIReadBits(currentPtr, 26, 26, &temp);
+ pDat->PortList[pDat->TotalLinks*2+1].Link = (u8)temp;
+ pDat->PortList[pDat->TotalLinks*2+1].Pointer = currentPtr;
+
+ lastLink = (u8)temp;
+ lastSBDFO = currentPtr;
+
+ depth++;
+ pDat->TotalLinks++;
+ currentBUID += unitIDcnt;
+ }
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ /* Provide information on automatic device results */
+ sHtEventNcohAutoDepth evt = {sizeof(sHtEventNcohAutoDepth), node, link, (depth - 1)};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_INFO,HT_EVENT_NCOH_AUTO_DEPTH,(u8 *)&evt);
+ }
+ }
+}
+
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * ncInit(sMainData *pDat)
+ *
+ * Description:
+ * Initialize the non-coherent fabric. Begin with the compat link on the BSP, then
+ * find and initialize all other non-coherent chains.
+ *
+ * Parameters:
+ * @param[in] sMainData* pDat = our global state
+ * ---------------------------------------------------------------------------------------
+ */
+void ncInit(sMainData *pDat)
+{
+ u8 node, link;
+ u8 compatLink;
+
+ compatLink = pDat->nb->readSbLink(pDat->nb);
+ processLink(0, compatLink, pDat);
+
+ for (node = 0; node <= pDat->NodesDiscovered; node++)
+ {
+ for (link = 0; link < pDat->nb->maxLinks; link++)
+ {
+ if (pDat->HtBlock->AMD_CB_IgnoreLink && pDat->HtBlock->AMD_CB_IgnoreLink(node, link))
+ continue; // Skip the link
+
+ if (node == 0 && link == compatLink)
+ continue;
+
+ if (pDat->nb->readTrueLinkFailStatus(node, link, pDat, pDat->nb))
+ continue;
+
+ if (pDat->nb->verifyLinkIsNonCoherent(node, link, pDat->nb))
+ processLink(node, link, pDat);
+ }
+ }
+}
+
+/***************************************************************************
+ *** Link Optimization ***
+ ***************************************************************************/
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * regangLinks(sMainData *pDat)
+ *
+ * Description:
+ * Test the sublinks of a link to see if they qualify to be reganged. If they do,
+ * update the port list data to indicate that this should be done. Note that no
+ * actual hardware state is changed in this routine.
+ *
+ * Parameters:
+ * @param[in,out] sMainData* pDat = our global state
+ * ---------------------------------------------------------------------------------------
+ */
+void regangLinks(sMainData *pDat)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u8 i, j;
+ for (i = 0; i < pDat->TotalLinks*2; i += 2)
+ {
+ ASSERT(pDat->PortList[i].Type < 2 && pDat->PortList[i].Link < pDat->nb->maxLinks); // Data validation
+ ASSERT(pDat->PortList[i+1].Type < 2 && pDat->PortList[i+1].Link < pDat->nb->maxLinks); // data validation
+ ASSERT(!(pDat->PortList[i].Type == PORTLIST_TYPE_IO && pDat->PortList[i+1].Type == PORTLIST_TYPE_CPU)); // ensure src is closer to the bsp than dst
+
+ /* Regang is false unless we pass all conditions below */
+ pDat->PortList[i].SelRegang = FALSE;
+ pDat->PortList[i+1].SelRegang = FALSE;
+
+ if ( (pDat->PortList[i].Type != PORTLIST_TYPE_CPU) || (pDat->PortList[i+1].Type != PORTLIST_TYPE_CPU))
+ continue; // Only process cpu to cpu links
+
+ for (j = i+2; j < pDat->TotalLinks*2; j += 2)
+ {
+ if ( (pDat->PortList[j].Type != PORTLIST_TYPE_CPU) || (pDat->PortList[j+1].Type != PORTLIST_TYPE_CPU) )
+ continue; // Only process cpu to cpu links
+
+ if (pDat->PortList[i].NodeID != pDat->PortList[j].NodeID)
+ continue; // Links must be from the same source
+
+ if (pDat->PortList[i+1].NodeID != pDat->PortList[j+1].NodeID)
+ continue; // Link must be to the same target
+
+ if ((pDat->PortList[i].Link & 3) != (pDat->PortList[j].Link & 3))
+ continue; // Ensure same source base port
+
+ if ((pDat->PortList[i+1].Link & 3) != (pDat->PortList[j+1].Link & 3))
+ continue; // Ensure same destination base port
+
+ if ((pDat->PortList[i].Link & 4) != (pDat->PortList[i+1].Link & 4))
+ continue; // Ensure sublink0 routes to sublink0
+
+ ASSERT((pDat->PortList[j].Link & 4) == (pDat->PortList[j+1].Link & 4)); // (therefore sublink1 routes to sublink1)
+
+ if (pDat->HtBlock->AMD_CB_SkipRegang &&
+ pDat->HtBlock->AMD_CB_SkipRegang(pDat->PortList[i].NodeID,
+ pDat->PortList[i].Link & 0x03,
+ pDat->PortList[i+1].NodeID,
+ pDat->PortList[i+1].Link & 0x03))
+ {
+ continue; // Skip regang
+ }
+
+
+ pDat->PortList[i].Link &= 0x03; // Force to point to sublink0
+ pDat->PortList[i+1].Link &= 0x03;
+ pDat->PortList[i].SelRegang = TRUE; // Enable link reganging
+ pDat->PortList[i+1].SelRegang = TRUE;
+ pDat->PortList[i].PrvWidthOutCap = HT_WIDTH_16_BITS;
+ pDat->PortList[i+1].PrvWidthOutCap = HT_WIDTH_16_BITS;
+ pDat->PortList[i].PrvWidthInCap = HT_WIDTH_16_BITS;
+ pDat->PortList[i+1].PrvWidthInCap = HT_WIDTH_16_BITS;
+
+ // Delete PortList[j, j+1], slow but easy to debug implementation
+ pDat->TotalLinks--;
+ Amdmemcpy(&(pDat->PortList[j]), &(pDat->PortList[j+2]), sizeof(sPortDescriptor)*(pDat->TotalLinks*2-j));
+ Amdmemset(&(pDat->PortList[pDat->TotalLinks*2]), INVALID_LINK, sizeof(sPortDescriptor)*2);
+
+ ////High performance, but would make debuging harder due to 'shuffling' of the records
+ ////Amdmemcpy(PortList[TotalPorts-2], PortList[j], SIZEOF(sPortDescriptor)*2);
+ ////TotalPorts -=2;
+
+ break; // Exit loop, advance to PortList[i+2]
+ }
+ }
+#endif /* HT_BUILD_NC_ONLY */
+}
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * selectOptimalWidthAndFrequency(sMainData *pDat)
+ *
+ * Description:
+ * For all links:
+ * Examine both sides of a link and determine the optimal frequency and width,
+ * taking into account externally provided limits and enforcing any other limit
+ * or matching rules as applicable except sublink balancing. Update the port
+ * list date with the optimal settings.
+ * Note no hardware state changes in this routine.
+ *
+ * Parameters:
+ * @param[in,out] sMainData* pDat = our global state, port list data
+ * ---------------------------------------------------------------------------------------
+ */
+void selectOptimalWidthAndFrequency(sMainData *pDat)
+{
+ u8 i, j;
+ u32 temp;
+ u16 cbPCBFreqLimit;
+ u8 cbPCBABDownstreamWidth;
+ u8 cbPCBBAUpstreamWidth;
+
+ for (i = 0; i < pDat->TotalLinks*2; i += 2)
+ {
+ cbPCBFreqLimit = 0xFFFF;
+ cbPCBABDownstreamWidth = 16;
+ cbPCBBAUpstreamWidth = 16;
+
+ if ( (pDat->PortList[i].Type == PORTLIST_TYPE_CPU) && (pDat->PortList[i+1].Type == PORTLIST_TYPE_CPU))
+ {
+ if (pDat->HtBlock->AMD_CB_Cpu2CpuPCBLimits)
+ {
+ pDat->HtBlock->AMD_CB_Cpu2CpuPCBLimits(
+ pDat->PortList[i].NodeID,
+ pDat->PortList[i].Link,
+ pDat->PortList[i+1].NodeID,
+ pDat->PortList[i+1].Link,
+ &cbPCBABDownstreamWidth,
+ &cbPCBBAUpstreamWidth, &cbPCBFreqLimit
+ );
+ }
+ }
+ else
+ {
+ if (pDat->HtBlock->AMD_CB_IOPCBLimits)
+ {
+ pDat->HtBlock->AMD_CB_IOPCBLimits(
+ pDat->PortList[i+1].NodeID,
+ pDat->PortList[i+1].HostLink,
+ pDat->PortList[i+1].HostDepth,
+ &cbPCBABDownstreamWidth,
+ &cbPCBBAUpstreamWidth, &cbPCBFreqLimit
+ );
+ }
+ }
+
+
+ temp = pDat->PortList[i].PrvFrequencyCap;
+ temp &= pDat->PortList[i+1].PrvFrequencyCap;
+ temp &= cbPCBFreqLimit;
+ pDat->PortList[i].CompositeFrequencyCap = (u16)temp;
+ pDat->PortList[i+1].CompositeFrequencyCap = (u16)temp;
+
+ ASSERT (temp != 0);
+ for (j = 15; ; j--)
+ {
+ if (temp & ((u32)1 << j))
+ break;
+ }
+
+ pDat->PortList[i].SelFrequency = j;
+ pDat->PortList[i+1].SelFrequency = j;
+
+ temp = pDat->PortList[i].PrvWidthOutCap;
+ if (pDat->PortList[i+1].PrvWidthInCap < temp)
+ temp = pDat->PortList[i+1].PrvWidthInCap;
+ if (cbPCBABDownstreamWidth < temp)
+ temp = cbPCBABDownstreamWidth;
+ pDat->PortList[i].SelWidthOut = (u8)temp;
+ pDat->PortList[i+1].SelWidthIn = (u8)temp;
+
+ temp = pDat->PortList[i].PrvWidthInCap;
+ if (pDat->PortList[i+1].PrvWidthOutCap < temp)
+ temp = pDat->PortList[i+1].PrvWidthOutCap;
+ if (cbPCBBAUpstreamWidth < temp)
+ temp = cbPCBBAUpstreamWidth;
+ pDat->PortList[i].SelWidthIn = (u8)temp;
+ pDat->PortList[i+1].SelWidthOut = (u8)temp;
+
+ }
+}
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * hammerSublinkFixup(sMainData *pDat)
+ *
+ * Description:
+ * Iterate through all links, checking the frequency of each sublink pair. Make the
+ * adjustment to the port list data so that the frequencies are at a valid ratio,
+ * reducing frequency as needed to achieve this. (All links support the minimum 200 MHz
+ * frequency.) Repeat the above until no adjustments are needed.
+ * Note no hardware state changes in this routine.
+ *
+ * Parameters:
+ * @param[in,out] sMainData* pDat = our global state, link state and port list
+ * ---------------------------------------------------------------------------------------
+ */
+void hammerSublinkFixup(sMainData *pDat)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u8 i, j, k;
+ BOOL changes, downgrade;
+
+ u8 hiIndex;
+ u8 hiFreq, loFreq;
+
+ u32 temp;
+
+ do
+ {
+ changes = FALSE;
+ for (i = 0; i < pDat->TotalLinks*2; i++)
+ {
+ if (pDat->PortList[i].Type != PORTLIST_TYPE_CPU) // Must be a CPU link
+ continue;
+ if (pDat->PortList[i].Link < 4) // Only look for for sublink1's
+ continue;
+
+ for (j = 0; j < pDat->TotalLinks*2; j++)
+ {
+ // Step 1. Find the matching sublink0
+ if (pDat->PortList[j].Type != PORTLIST_TYPE_CPU)
+ continue;
+ if (pDat->PortList[j].NodeID != pDat->PortList[i].NodeID)
+ continue;
+ if (pDat->PortList[j].Link != (pDat->PortList[i].Link & 0x03))
+ continue;
+
+ // Step 2. Check for an illegal frequency ratio
+ if (pDat->PortList[i].SelFrequency >= pDat->PortList[j].SelFrequency)
+ {
+ hiIndex = i;
+ hiFreq = pDat->PortList[i].SelFrequency;
+ loFreq = pDat->PortList[j].SelFrequency;
+ }
+ else
+ {
+ hiIndex = j;
+ hiFreq = pDat->PortList[j].SelFrequency;
+ loFreq = pDat->PortList[i].SelFrequency;
+ }
+
+ if (hiFreq == loFreq)
+ break; // The frequencies are 1:1, no need to do anything
+
+ downgrade = FALSE;
+
+ if (hiFreq == 13)
+ {
+ if ((loFreq != 7) && //{13, 7} 2400MHz / 1200MHz 2:1
+ (loFreq != 4) && //{13, 4} 2400MHz / 600MHz 4:1
+ (loFreq != 2) ) //{13, 2} 2400MHz / 400MHz 6:1
+ downgrade = TRUE;
+ }
+ else if (hiFreq == 11)
+ {
+ if ((loFreq != 6)) //{11, 6} 2000MHz / 1000MHz 2:1
+ downgrade = TRUE;
+ }
+ else if (hiFreq == 9)
+ {
+ if ((loFreq != 5) && //{ 9, 5} 1600MHz / 800MHz 2:1
+ (loFreq != 2) && //{ 9, 2} 1600MHz / 400MHz 4:1
+ (loFreq != 0) ) //{ 9, 0} 1600MHz / 200Mhz 8:1
+ downgrade = TRUE;
+ }
+ else if (hiFreq == 7)
+ {
+ if ((loFreq != 4) && //{ 7, 4} 1200MHz / 600MHz 2:1
+ (loFreq != 0) ) //{ 7, 0} 1200MHz / 200MHz 6:1
+ downgrade = TRUE;
+ }
+ else if (hiFreq == 5)
+ {
+ if ((loFreq != 2) && //{ 5, 2} 800MHz / 400MHz 2:1
+ (loFreq != 0) ) //{ 5, 0} 800MHz / 200MHz 4:1
+ downgrade = TRUE;
+ }
+ else if (hiFreq == 2)
+ {
+ if ((loFreq != 0)) //{ 2, 0} 400MHz / 200MHz 2:1
+ downgrade = TRUE;
+ }
+ else
+ {
+ downgrade = TRUE; // no legal ratios for hiFreq
+ }
+
+ // Step 3. Downgrade the higher of the two frequencies, and set nochanges to FALSE
+ if (downgrade)
+ {
+ // Although the problem was with the port specified by hiIndex, we need to
+ // downgrade both ends of the link.
+ hiIndex = hiIndex & 0xFE; // Select the 'upstream' (i.e. even) port
+
+ temp = pDat->PortList[hiIndex].CompositeFrequencyCap;
+
+ // Remove hiFreq from the list of valid frequencies
+ temp = temp & ~((u32)1 << hiFreq);
+ ASSERT (temp != 0);
+ pDat->PortList[hiIndex].CompositeFrequencyCap = (u16)temp;
+ pDat->PortList[hiIndex+1].CompositeFrequencyCap = (u16)temp;
+
+ for (k = 15; ; k--)
+ {
+ if (temp & ((u32)1 << k))
+ break;
+ }
+
+ pDat->PortList[hiIndex].SelFrequency = k;
+ pDat->PortList[hiIndex+1].SelFrequency = k;
+
+ changes = TRUE;
+ }
+ }
+ }
+ } while (changes); // Repeat until a valid configuration is reached
+#endif /* HT_BUILD_NC_ONLY */
+}
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * linkOptimization(sMainData *pDat)
+ *
+ * Description:
+ * Based on link capabilities, apply optimization rules to come up with the real best
+ * settings, including several external limit decision from call backs. This includes
+ * handling of sublinks. Finally, after the port list data is updated, set the hardware
+ * state for all links.
+ *
+ * Parameters:
+ * @param[in] sMainData* pDat = our global state
+ * ---------------------------------------------------------------------------------------
+ */
+void linkOptimization(sMainData *pDat)
+{
+ pDat->nb->gatherLinkData(pDat, pDat->nb);
+ regangLinks(pDat);
+ selectOptimalWidthAndFrequency(pDat);
+ hammerSublinkFixup(pDat);
+ pDat->nb->setLinkData(pDat, pDat->nb);
+}
+
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * trafficDistribution(sMainData *pDat)
+ *
+ * Description:
+ * In the case of a two node system with both sublinks used, enable the traffic
+ * distribution feature.
+ *
+ * Parameters:
+ * @param[in] sMainData* pDat = our global state, port list data
+ * ---------------------------------------------------------------------------------------
+ */
+void trafficDistribution(sMainData *pDat)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u32 links01, links10;
+ u8 linkCount;
+ u8 i;
+
+ // Traffic Distribution is only used when there are exactly two nodes in the system
+ if (pDat->NodesDiscovered+1 != 2)
+ return;
+
+ links01 = 0;
+ links10 = 0;
+ linkCount = 0;
+ for (i = 0; i < pDat->TotalLinks*2; i += 2)
+ {
+ if ((pDat->PortList[i].Type == PORTLIST_TYPE_CPU) && (pDat->PortList[i+1].Type == PORTLIST_TYPE_CPU))
+ {
+ links01 |= (u32)1 << pDat->PortList[i].Link;
+ links10 |= (u32)1 << pDat->PortList[i+1].Link;
+ linkCount++;
+ }
+ }
+ ASSERT(linkCount != 0);
+ if (linkCount == 1)
+ return; // Don't setup Traffic Distribution if only one link is being used
+
+ pDat->nb->writeTrafficDistribution(links01, links10, pDat->nb);
+#endif /* HT_BUILD_NC_ONLY */
+}
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * tuning(sMainData *pDat)
+ *
+ * Description:
+ * Handle system and performance tunings, such as traffic distribution, fifo and
+ * buffer tuning, and special config tunings.
+ *
+ * Parameters:
+ * @param[in] sMainData* pDat = our global state, port list data
+ * ---------------------------------------------------------------------------------------
+ */
+void tuning(sMainData *pDat)
+{
+ u8 i;
+
+ /* See if traffic distribution can be done and do it if so
+ * or allow system specific customization
+ */
+ if ((pDat->HtBlock->AMD_CB_CustomizeTrafficDistribution == NULL)
+ || !pDat->HtBlock->AMD_CB_CustomizeTrafficDistribution())
+ {
+ trafficDistribution(pDat);
+ }
+
+ /* For each node, invoke northbridge specific buffer tunings or
+ * system specific customizations.
+ */
+ for (i=0; i < pDat->NodesDiscovered + 1; i++)
+ {
+ if ((pDat->HtBlock->AMD_CB_CustomizeBuffers == NULL)
+ || !pDat->HtBlock->AMD_CB_CustomizeBuffers(i))
+ {
+ pDat->nb->bufferOptimizations(i, pDat, pDat->nb);
+ }
+ }
+}
+
+/*----------------------------------------------------------------------------------------
+ * BOOL
+ * isSanityCheckOk()
+ *
+ * Description:
+ * Perform any general sanity checks which should prevent HT from running if they fail.
+ * Currently only the "Must run on BSP only" check.
+ *
+ * Parameters:
+ * @param[out] result BOOL = true if check is ok, false if it failed
+ * ---------------------------------------------------------------------------------------
+ */
+BOOL isSanityCheckOk()
+{
+ uint64 qValue;
+
+ AmdMSRRead(APIC_Base, &qValue);
+
+ return ((qValue.lo & ((u32)1 << APIC_Base_BSP)) != 0);
+}
+
+/***************************************************************************
+ *** HT Initialize ***
+ ***************************************************************************/
+
+/*----------------------------------------------------------------------------------------
+ * void
+ * htInitialize(AMD_HTBLOCK *pBlock)
+ *
+ * Description:
+ * This is the top level external interface for Hypertransport Initialization.
+ * Create our initial internal state, initialize the coherent fabric,
+ * initialize the non-coherent chains, and perform any required fabric tuning or
+ * optimization.
+ *
+ * Parameters:
+ * @param[in] AMD_HTBLOCK* pBlock = Our Initial State including possible
+ * topologies and routings, non coherent bus
+ * assignment info, and actual
+ * wrapper or OEM call back routines.
+ * ---------------------------------------------------------------------------------------
+ */
+void amdHtInitialize(AMD_HTBLOCK *pBlock)
+{
+ sMainData pDat;
+ cNorthBridge nb;
+
+ if (isSanityCheckOk())
+ {
+ newNorthBridge(0, &nb);
+
+ pDat.HtBlock = pBlock;
+ pDat.nb = &nb;
+ pDat.sysMpCap = nb.maxNodes;
+ nb.isCapable(0, &pDat, pDat.nb);
+ coherentInit(&pDat);
+
+ pDat.AutoBusCurrent = pBlock->AutoBusStart;
+ pDat.UsedCfgMapEntires = 0;
+ ncInit(&pDat);
+ linkOptimization(&pDat);
+ tuning(&pDat);
+ }
+}
diff --git a/src/northbridge/amd/amdht/h3finit.h b/src/northbridge/amd/amdht/h3finit.h
new file mode 100644
index 0000000000..8da1bd2e8c
--- /dev/null
+++ b/src/northbridge/amd/amdht/h3finit.h
@@ -0,0 +1,613 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef H3FINIT_H
+#define H3FINIT_H
+
+/*----------------------------------------------------------------------------
+ * Mixed (DEFINITIONS AND MACROS / TYPEDEFS, STRUCTURES, ENUMS)
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/*-----------------------------------------------------------------------------
+ * DEFINITIONS AND MACROS
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+/* Width equates for call backs */
+#define HT_WIDTH_8_BITS 8
+#define HT_WIDTH_16_BITS 16
+#define HT_WIDTH_4_BITS 4
+#define HT_WIDTH_2_BITS 2
+
+/* Frequency equates for call backs which take an actual frequency setting */
+#define HT_FREQUENCY_200M 0
+#define HT_FREQUENCY_400M 2
+#define HT_FREQUENCY_600M 4
+#define HT_FREQUENCY_800M 5
+#define HT_FREQUENCY_1000M 6
+#define HT_FREQUENCY_1200M 7
+#define HT_FREQUENCY_1400M 8
+#define HT_FREQUENCY_1600M 9
+#define HT_FREQUENCY_1800M 10
+#define HT_FREQUENCY_2000M 11
+#define HT_FREQUENCY_2200M 12
+#define HT_FREQUENCY_2400M 13
+#define HT_FREQUENCY_2600M 14
+
+/* Frequency Limit equates for call backs which take a frequency supported mask. */
+#define HT_FREQUENCY_LIMIT_200M 1
+#define HT_FREQUENCY_LIMIT_400M 7
+#define HT_FREQUENCY_LIMIT_600M 0x1F
+#define HT_FREQUENCY_LIMIT_800M 0x3F
+#define HT_FREQUENCY_LIMIT_1000M 0x7F
+#define HT_FREQUENCY_LIMIT_HT1_ONLY 0x7F
+#define HT_FREQUENCY_LIMIT_1200M 0xFF
+#define HT_FREQUENCY_LIMIT_1400M 0x1FF
+#define HT_FREQUENCY_LIMIT_1600M 0x3FF
+#define HT_FREQUENCY_LIMIT_1800M 0x7FF
+#define HT_FREQUENCY_LIMIT_2000M 0xFFF
+#define HT_FREQUENCY_LIMIT_2200M 0x1FFF
+#define HT_FREQUENCY_LIMIT_2400M 0x3FFF
+#define HT_FREQUENCY_LIMIT_2600M 0x7FFF
+
+/*
+ * Event Notify definitions
+ */
+
+/* Event Class definitions */
+#define HT_EVENT_CLASS_CRITICAL 1
+#define HT_EVENT_CLASS_ERROR 2
+#define HT_EVENT_CLASS_HW_FAULT 3
+#define HT_EVENT_CLASS_WARNING 4
+#define HT_EVENT_CLASS_INFO 5
+
+/* Event definitions. */
+
+/* Coherent subfunction events */
+#define HT_EVENT_COH_EVENTS 0x1000
+#define HT_EVENT_COH_NO_TOPOLOGY 0x1001
+#define HT_EVENT_COH_LINK_EXCEED 0x1002
+#define HT_EVENT_COH_FAMILY_FEUD 0x1003
+#define HT_EVENT_COH_NODE_DISCOVERED 0x1004
+#define HT_EVENT_COH_MPCAP_MISMATCH 0x1005
+
+/* Non-coherent subfunction events */
+#define HT_EVENT_NCOH_EVENTS 0x2000
+#define HT_EVENT_NCOH_BUID_EXCEED 0x2001
+#define HT_EVENT_NCOH_LINK_EXCEED 0x2002
+#define HT_EVENT_NCOH_BUS_MAX_EXCEED 0x2003
+#define HT_EVENT_NCOH_CFG_MAP_EXCEED 0x2004
+#define HT_EVENT_NCOH_DEVICE_FAILED 0x2005
+#define HT_EVENT_NCOH_AUTO_DEPTH 0x2006
+
+/* Optimization subfunction events */
+#define HT_EVENT_OPT_EVENTS 0x3000
+#define HT_EVENT_OPT_REQUIRED_CAP_RETRY 0x3001
+#define HT_EVENT_OPT_REQUIRED_CAP_GEN3 0x3002
+
+/* HW Fault events */
+#define HT_EVENT_HW_EVENTS 0x4000
+#define HT_EVENT_HW_SYNCHFLOOD 0x4001
+#define HT_EVENT_HW_HTCRC 0x4002
+
+/* The bbHT component (hb*) uses 0x5000 for events.
+ * For consistency, we avoid that range here.
+ */
+
+/*----------------------------------------------------------------------------
+ * TYPEDEFS, STRUCTURES, ENUMS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+typedef struct {
+ u8 **topolist;
+ u8 AutoBusStart;
+ /* Note: This should always be the form AutoBusCurrent+N*AutoBusIncrement, also bus 253-255 are reserved */
+ u8 AutoBusMax;
+ u8 AutoBusIncrement;
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * AMD_CB_IgnoreLink(u8 Node, u8 Link)
+ *
+ * Description:
+ * This routine is called every time a coherent link is found and then every
+ * time a non-coherent link from a CPU is found.
+ * Any coherent or non-coherent link from a CPU can be ignored and not used
+ * for discovery or initialization. Useful for connection based systems.
+ * (Note: not called for IO device to IO Device links.)
+ *
+ * Parameters:
+ * @param[in] u8 node = The node on which this link is located
+ * @param[in] u8 link = The link about to be initialized
+ * @param[out] BOOL result = true to ignore this link and skip it
+ * false to initialize the link normally
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ BOOL (*AMD_CB_IgnoreLink)(u8 Node, u8 Link);
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * AMD_CB_OverrideBusNumbers(u8 Node, u8 Link, u8 *SecBus, u8 *SubBus)
+ *
+ * Description:
+ * This routine is called every time a non-coherent chain is processed.
+ * If a system can not use the auto Bus numbering feature for non-coherent chain bus
+ * assignments, this routine can provide explicit control. For each chain, provide
+ * the bus number range to use.
+ *
+ * Parameters:
+ * @param[in] u8 node = The node on which this chain is located
+ * @param[in] u8 link = The link on the host for this chain
+ * @param[out] u8 secBus = Secondary Bus number for this non-coherent chain
+ * @param[out] u8* subBus = Subordinate Bus number
+ * @param[out] BOOL result = true this routine is supplying the bus numbers
+ * false use auto Bus numbering
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ BOOL (*AMD_CB_OverrideBusNumbers)(u8 Node, u8 Link, u8 *SecBus, u8 *SubBus);
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * AMD_CB_ManualBUIDSwapList(u8 Node, u8 Link, u8 **List)
+ *
+ * Description:
+ * This routine is called every time a non-coherent chain is processed.
+ * BUID assignment may be controlled explicitly on a non-coherent chain. Provide a
+ * swap list. The first part of the list controls the BUID assignment and the
+ * second part of the list provides the device to device linking. Device orientation
+ * can be detected automatically, or explicitly. See documentation for more details.
+ *
+ * Automatic non-coherent init assigns BUIDs starting at 1 and incrementing sequentially
+ * based on each device's unit count.
+ *
+ * Parameters:
+ * @param[in] u8 node = The node on which this chain is located
+ * @param[in] u8 link = The link on the host for this chain
+ * @param[out] u8** list = supply a pointer to a list
+ * @param[out] BOOL result = true to use a manual list
+ * false to initialize the link automatically
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ BOOL (*AMD_CB_ManualBUIDSwapList)(u8 Node, u8 Link, u8 **List);
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * void
+ * AMD_CB_DeviceCapOverride(u8 HostNode, u8 HostLink, u8 Depth, u8 Segment,
+ * u8 Bus, u8 Dev, u32 DevVenID, u8 Link,
+ * u8 *LinkWidthIn, u8 *LinkWidthOut, u16 *FreqCap)
+ *
+ * Description:
+ * This routine is called once for every link on every IO device.
+ * Update the width and frequency capability if needed for this device.
+ * This is used along with device capabilities, the limit call backs, and northbridge
+ * limits to compute the default settings. The components of the device's PCI config
+ * address are provided, so its settings can be consulted if need be. The input width
+ * and frequency are the reported device capabilities.
+ *
+ * Parameters:
+ * @param[in] u8 hostNode = The node on which this chain is located
+ * @param[in] u8 hostLink = The link on the host for this chain
+ * @param[in] u8 Depth = The depth in the I/O chain from the Host
+ * @param[in] u8 Segment = The Device's PCI Bus Segment number
+ * @param[in] u8 Bus = The Device's PCI Bus number
+ * @param[in] u8 Dev = The Device's PCI device Number
+ * @param[in] u32 DevVenID = The Device's PCI Vendor + Device ID (offset 0x00)
+ * @param[in] u8 Link = The Device's link number (0 or 1)
+ * @param[in,out] u8* LinkWidthIn = modify to change the Link Witdh In
+ * @param[in,out] u8* LinkWidthOut = modify to change the Link Witdh Out
+ * @param[in,out] u16* FreqCap = modify to change the link's frequency capability
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ void (*AMD_CB_DeviceCapOverride)(
+ u8 HostNode,
+ u8 HostLink,
+ u8 Depth,
+ u8 Segment,
+ u8 Bus,
+ u8 Dev,
+ u32 DevVenID,
+ u8 Link,
+ u8 *LinkWidthIn,
+ u8 *LinkWidthOut,
+ u16 *FreqCap
+ );
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * void
+ * AMD_CB_Cpu2CpuPCBLimits(u8 NodeA, u8 LinkA, u8 NodeB, u8 LinkB,
+ * u8 *ABLinkWidthLimit, u8 *BALinkWidthLimit, u16 *PCBFreqCap)
+ *
+ * Description:
+ * For each coherent connection this routine is called once.
+ * Update the frequency and width if needed for this link (usually based on board
+ * restriction). This is used with CPU device capabilities and northbridge limits
+ * to compute the default settings. The input width and frequency are valid, but do
+ * not necessarily reflect the minimum setting that will be chosen.
+ *
+ * Parameters:
+ * @param[in] u8 nodeA = One node on which this link is located
+ * @param[in] u8 linkA = The link on this node
+ * @param[in] u8 nodeB = The other node on which this link is located
+ * @param[in] u8 linkB = The link on that node
+ * @param[in,out] u8* ABLinkWidthLimit = modify to change the Link Witdh In
+ * @param[in,out] u8* BALinkWidthLimit = modify to change the Link Witdh Out
+ * @param[in,out] u16* PCBFreqCap = modify to change the link's frequency capability
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ void (*AMD_CB_Cpu2CpuPCBLimits)(
+ u8 NodeA,
+ u8 LinkA,
+ u8 NodeB,
+ u8 LinkB,
+ u8 *ABLinkWidthLimit,
+ u8 *BALinkWidthLimit,
+ u16 *PCBFreqCap
+ );
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * void
+ * AMD_CB_IOPCBLimits(u8 HostNode, u8 HostLink, u8 Depth, u8 *DownstreamLinkWidthLimit,
+ * u8 *UpstreamLinkWidthLimit, u16 *PCBFreqCap)
+ *
+ * Description:
+ * For each non-coherent connection this routine is called once.
+ * Update the frequency and width if needed for this link (usually based on board
+ * restriction). This is used with device capabilities, device overrides, and northbridge limits
+ * to compute the default settings. The input width and frequency are valid, but do
+ * not necessarily reflect the minimum setting that will be chosen.
+ *
+ * Parameters:
+ * @param[in] u8 hostNode = The node on which this link is located
+ * @param[in] u8 hostLink = The link about to be initialized
+ * @param[in] u8 Depth = The depth in the I/O chain from the Host
+ * @param[in,out] u8* DownstreamLinkWidthLimit = modify to change the Link Witdh In
+ * @param[in,out] u8* UpstreamLinkWidthLimit = modify to change the Link Witdh Out
+ * @param[in,out] u16* PCBFreqCap = modify to change the link's frequency capability
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ void (*AMD_CB_IOPCBLimits)(
+ u8 HostNode,
+ u8 HostLink,
+ u8 Depth,
+ u8 *DownstreamLinkWidthLimit,
+ u8 *UpstreamLinkWidthLimit,
+ u16 *PCBFreqCap
+ );
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * AMD_CB_SkipRegang(u8 NodeA, u8 LinkA, u8 NodeB, u8 LinkB)
+ *
+ * Description:
+ * This routine is called whenever two sublinks are both connected to the same CPUs.
+ * Normally, unganged subsinks between the same two CPUs are reganged.
+ * Return true from this routine to leave the links unganged.
+ *
+ * Parameters:
+ * @param[in] u8 nodeA = One node on which this link is located
+ * @param[in] u8 linkA = The link on this node
+ * @param[in] u8 nodeB = The other node on which this link is located
+ * @param[in] u8 linkB = The link on that node
+ * @param[out] BOOL result = true to leave link unganged
+ * false to regang link automatically
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ BOOL (*AMD_CB_SkipRegang)(
+ u8 NodeA,
+ u8 LinkA,
+ u8 NodeB,
+ u8 LinkB
+ );
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * AMD_CB_CustomizeTrafficDistribution()
+ *
+ * Description:
+ * Near the end of HT initialization, this routine is called once.
+ * If this routine will handle traffic distribution in a proprietary way,
+ * after detecting which links to distribute traffic on and configuring the system,
+ * return true. Return false to let the HT code detect and do traffic distribution
+ * This routine can also be used to simply turn this feature off, or to pre-process
+ * the system before normal traffic distribution.
+ *
+ * Parameters:
+ * @param[out] BOOL result = true skip traffic distribution
+ * false do normal traffic distribution
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ BOOL (*AMD_CB_CustomizeTrafficDistribution)();
+
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * AMD_CB_CustomizeBuffers(u8 Node)
+ *
+ * Description:
+ * Near the end of HT initialization, this routine is called once per CPU node.
+ * Implement proprietary buffer tuning and return true, or return false for normal tuning.
+ * This routine can also be used to simply turn this feature off, or to pre-process
+ * the system before normal tuning.
+ *
+ * Parameters:
+ * @param[in] u8 node = buffer allocation may apply to this node
+ * @param[out] BOOL result = true skip buffer allocation on this node
+ * false tune buffers normally
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ BOOL (*AMD_CB_CustomizeBuffers)( u8 node );
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * void
+ * AMD_CB_OverrideDevicePort(u8 HostNode, u8 HostLink, u8 Depth, u8 *LinkWidthIn,
+ * u8 *LinkWidthOut, u16 *LinkFrequency)
+ *
+ * Description:
+ * Called once for each active link on each IO device.
+ * Provides an opportunity to directly control the frequency and width,
+ * intended for test and debug. The input frequency and width will be used
+ * if not overridden.
+ *
+ * Parameters:
+ * @param[in] u8 hostNode = The node on which this link is located
+ * @param[in] u8 hostLink = The link about to be initialized
+ * @param[in] u8 Depth = The depth in the I/O chain from the Host
+ * @param[in] u8 Link = the link on the device (0 or 1)
+ * @param[in,out] u8* LinkWidthIn = modify to change the Link Witdh In
+ * @param[in,out] u8* LinkWidthOut = modify to change the Link Witdh Out
+ * @param[in,out] u16* LinkFrequency = modify to change the link's frequency capability
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ void (*AMD_CB_OverrideDevicePort)(
+ u8 HostNode,
+ u8 HostLink,
+ u8 Depth,
+ u8 Link,
+ u8 *LinkWidthIn,
+ u8 *LinkWidthOut,
+ u8 *LinkFrequency
+ );
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * void
+ * AMD_CB_OverrideCpuPort(u8 Node, u8 Link, u8 *LinkWidthIn, u8 *LinkWidthOut,
+ * u16 *LinkFrequency)
+ *
+ * Description:
+ * Called once for each active link on each CPU.
+ * Provides an opportunity to directly control the frequency and width,
+ * intended for test and debug. The input frequency and width will be used
+ * if not overridden.
+ *
+ * Parameters:
+ * @param[in] u8 node = One node on which this link is located
+ * @param[in] u8 link = The link on this node
+ * @param[in,out] u8* LinkWidthIn = modify to change the Link Witdh In
+ * @param[in,out] u8* LinkWidthOut = modify to change the Link Witdh Out
+ * @param[in,out] u16* LinkFrequency = modify to change the link's frequency capability
+ *
+ *---------------------------------------------------------------------------------------
+ */
+ void (*AMD_CB_OverrideCpuPort)(
+ u8 Node,
+ u8 Link,
+ u8 *LinkWidthIn,
+ u8 *LinkWidthOut,
+ u8 *LinkFrequency
+ );
+
+ /**----------------------------------------------------------------------------------------
+ *
+ * void
+ * AMD_CB_EventNotify(u8 evtClass, u16 event, const u8 *pEventData0)
+ *
+ * Description:
+ * Errors, events, faults, warnings, and useful information are provided by
+ * calling this routine as often as necessary, once for each notification.
+ * See elsewhere in this file for class, event, and event data definitions.
+ * See the documentation for more details.
+ *
+ * Parameters:
+ * @param[in] u8 evtClass = What level event is this
+ * @param[in] u16 event = A unique ID of this event
+ * @param[in] u8* pEventData0 = useful data associated with the event.
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+ void (*AMD_CB_EventNotify) (
+ u8 evtClass,
+ u16 event,
+ const u8 *pEventData0
+ );
+
+} AMD_HTBLOCK;
+
+/*
+ * Event Notification Structures
+ * These structures are passed to AMD_CB_EventNotify as *pEventData0.
+ */
+
+/* For event HT_EVENT_HW_SYNCHFLOOD */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+} sHtEventHWSynchFlood;
+
+/* For event HT_EVENT_HW_HTCRC */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 laneMask;
+} sHtEventHWHtCrc;
+
+/* For event HT_EVENT_NCOH_BUS_MAX_EXCEED */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 bus;
+} sHTEventNcohBusMaxExceed;
+
+/* For event HT_EVENT_NCOH_LINK_EXCEED */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 depth;
+ u8 maxLinks;
+} sHtEventNcohLinkExceed;
+
+/* For event HT_EVENT_NCOH_CFG_MAP_EXCEED */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+} sHtEventNcohCfgMapExceed;
+
+/* For event HT_EVENT_NCOH_BUID_EXCEED */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 depth;
+ u8 currentBUID;
+ u8 unitCount;
+} sHtEventNcohBuidExceed;
+
+/* For event HT_EVENT_NCOH_DEVICE_FAILED */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 depth;
+ u8 attemptedBUID;
+} sHtEventNcohDeviceFailed;
+
+/* For event HT_EVENT_NCOH_AUTO_DEPTH */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 depth;
+} sHtEventNcohAutoDepth;
+
+/* For event HT_EVENT_OPT_REQUIRED_CAP_RETRY,
+ * HT_EVENT_OPT_REQUIRED_CAP_GEN3
+ */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 depth;
+} sHtEventOptRequiredCap;
+
+/* For event HT_EVENT_COH_NO_TOPOLOGY */
+typedef struct
+{
+ u8 eSize;
+ u8 totalNodes;
+} sHtEventCohNoTopology;
+
+/* For event HT_EVENT_COH_LINK_EXCEED */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 targetNode;
+ u8 totalNodes;
+ u8 maxLinks;
+} sHtEventCohLinkExceed;
+
+/* For event HT_EVENT_COH_FAMILY_FEUD */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 totalNodes;
+} sHtEventCohFamilyFeud;
+
+/* For event HT_EVENT_COH_NODE_DISCOVERED */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 newNode;
+} sHtEventCohNodeDiscovered;
+
+/* For event HT_EVENT_COH_MPCAP_MISMATCH */
+typedef struct
+{
+ u8 eSize;
+ u8 node;
+ u8 link;
+ u8 sysMpCap;
+ u8 totalNodes;
+} sHtEventCohMpCapMismatch;
+
+/*----------------------------------------------------------------------------
+ * FUNCTIONS PROTOTYPE
+ *
+ *----------------------------------------------------------------------------
+ */
+void amdHtInitialize(AMD_HTBLOCK *pBlock);
+
+
+#endif /* H3FINIT_H */
+
+
diff --git a/src/northbridge/amd/amdht/h3gtopo.h b/src/northbridge/amd/amdht/h3gtopo.h
new file mode 100644
index 0000000000..a724618763
--- /dev/null
+++ b/src/northbridge/amd/amdht/h3gtopo.h
@@ -0,0 +1,358 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software * you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation * version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY * without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program * if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef HTTOPO_H
+#define HTTOPO_H
+
+/*----------------------------------------------------------------------------
+ * Mixed (DEFINITIONS AND MACROS / TYPEDEFS, STRUCTURES, ENUMS)
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/*-----------------------------------------------------------------------------
+ * DEFINITIONS AND MACROS
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------
+ * TYPEDEFS, STRUCTURES, ENUMS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/*
+ * 0
+ */
+static u8 const amdHtTopologySingleNode[] = {
+ 0x01,
+ 0x00, 0xFF // Node 0
+};
+
+/*
+ * 0---1
+ */
+static u8 const amdHtTopologyDualNode[] = {
+ 0x02,
+ 0x02, 0xFF, 0x00, 0x11, // Node 0
+ 0x00, 0x00, 0x01, 0xFF // Node 1
+};
+
+/*
+ * 2
+ * |
+ * |
+ * 0---1
+ */
+static u8 const amdHtTopologyThreeLine[] = {
+ 0x03,
+ 0x06, 0xFF, 0x04, 0x11, 0x02, 0x22, // Node 0
+ 0x00, 0x00, 0x01, 0xFF, 0x00, 0x00, // Node 1
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF // Node 2
+};
+
+/*
+ * 2
+ * |\
+ * | \
+ * 0---1
+ */
+static u8 const amdHtTopologyTriangle[] = {
+ 0x03,
+ 0x06, 0xFF, 0x00, 0x11, 0x00, 0x22, // Node 0
+ 0x00, 0x00, 0x05, 0xFF, 0x00, 0x22, // Node 1
+ 0x00, 0x00, 0x00, 0x11, 0x03, 0xFF // Node 2
+};
+
+/*
+ * 2 3
+ * |\ |
+ * | \|
+ * 0---1
+ */
+static u8 const amdHtTopologyFourDegenerate[] = {
+ 0x04,
+ 0x06, 0xFF, 0x00, 0x11, 0x00, 0x22, 0x00, 0x11, // Node 0
+ 0x08, 0x00, 0x0D, 0xFF, 0x08, 0x22, 0x05, 0x33, // Node 1
+ 0x00, 0x00, 0x00, 0x11, 0x03, 0xFF, 0x00, 0x11, // Node 2
+ 0x00, 0x11, 0x00, 0x11, 0x00, 0x11, 0x02, 0xFF // Node 3
+};
+
+/*
+ * 2---3
+ * |\ /|
+ * |/ \|
+ * 0---1
+ */
+static u8 const amdHtTopologyFourFully[] = {
+ 0x04,
+ 0x0E, 0xFF, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, // Node 0
+ 0x00, 0x00, 0x0D, 0xFF, 0x00, 0x22, 0x00, 0x33, // Node 1
+ 0x00, 0x00, 0x00, 0x11, 0x0B, 0xFF, 0x00, 0x33, // Node 2
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x07, 0xFF // Node 3
+};
+
+
+/*
+ * 2---3
+ * |\ |
+ * | \|
+ * 0---1
+ */
+static u8 const amdHtTopologyFourKite[] = {
+ 0x04,
+ 0x06, 0xFF, 0x00, 0x11, 0x00, 0x22, 0x00, 0x11, // Node 0
+ 0x08, 0x00, 0x0D, 0xFF, 0x00, 0x22, 0x00, 0x33, // Node 1
+ 0x00, 0x00, 0x00, 0x11, 0x0B, 0xFF, 0x01, 0x33, // Node 2
+ 0x00, 0x22, 0x00, 0x11, 0x00, 0x22, 0x06, 0xFF // Node 3
+};
+
+
+/*
+ * 2 3
+ * | |
+ * | |
+ * 0---1
+ */
+static u8 const amdHtTopologyFourLine[] = {
+ 0x04,
+ 0x06, 0xFF, 0x04, 0x11, 0x02, 0x22, 0x04, 0x11, // Node 0
+ 0x08, 0x00, 0x09, 0xFF, 0x08, 0x00, 0x01, 0x33, // Node 1
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0x00, 0x00, // Node 2
+ 0x00, 0x11, 0x00, 0x11, 0x00, 0x11, 0x02, 0xFF // Node 3
+};
+
+
+/*
+ * 2---3
+ * | |
+ * | |
+ * 0---1
+ */
+static u8 const amdHtTopologyFourSquare[] = {
+ 0x04,
+ 0x06, 0xFF, 0x00, 0x11, 0x02, 0x22, 0x00, 0x22, // Node 0
+ 0x00, 0x00, 0x09, 0xFF, 0x00, 0x33, 0x01, 0x33, // Node 1
+ 0x08, 0x00, 0x00, 0x00, 0x09, 0xFF, 0x00, 0x33, // Node 2
+ 0x00, 0x11, 0x04, 0x11, 0x00, 0x22, 0x06, 0xFF, // Node 3
+};
+
+
+/*
+ * 2---3
+ * |\
+ * | \
+ * 0 1
+ */
+static u8 const amdHtTopologyFourStar[] = {
+ 0x04,
+ 0x04, 0xFF, 0x00, 0x22, 0x00, 0x22, 0x00, 0x22, // Node 0
+ 0x00, 0x22, 0x04, 0xFF, 0x00, 0x22, 0x00, 0x22, // Node 1
+ 0x0A, 0x00, 0x09, 0x11, 0x0B, 0xFF, 0x03, 0x33, // Node 2
+ 0x00, 0x22, 0x00, 0x22, 0x00, 0x22, 0x04, 0xFF, // Node 3
+};
+
+
+static u8 const amdHtTopologyFiveFully[] = {
+ 0x05,
+ 0x1E, 0xFF, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, // Node 0
+ 0x00, 0x00, 0x1D, 0xFF, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, // Node 1
+ 0x00, 0x00, 0x00, 0x11, 0x1B, 0xFF, 0x00, 0x33, 0x00, 0x44, // Node 2
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x17, 0xFF, 0x00, 0x44, // Node 3
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x0F, 0xFF // Node 4
+};
+
+
+/*
+ *
+ * 4
+ * |\
+ * | \
+ * 2 3
+ * | |
+ * 0---1
+ */
+static u8 const amdHtTopologyFiveTwistedLadder[] = {
+ 0x05,
+ 0x06, 0xFF, 0x04, 0x11, 0x02, 0x22, 0x00, 0x11, 0x00, 0x22, // Node0
+ 0x08, 0x00, 0x09, 0xFF, 0x08, 0x00, 0x01, 0x33, 0x00, 0x30, // Node1
+ 0x10, 0x00, 0x10, 0x00, 0x11, 0xFF, 0x00, 0x40, 0x01, 0x44, // Node2
+ 0x00, 0x11, 0x00, 0x11, 0x00, 0x14, 0x12, 0xFF, 0x02, 0x44, // Node3
+ 0x00, 0x22, 0x00, 0x23, 0x00, 0x22, 0x04, 0x33, 0x0C, 0xFF // Node4
+};
+
+
+static u8 const amdHtTopologySixFully[] = {
+ 0x06,
+ 0x3E, 0xFF, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, // Node 0
+ 0x00, 0x00, 0x3D, 0xFF, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, // Node 1
+ 0x00, 0x00, 0x00, 0x11, 0x3B, 0xFF, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, // Node 2
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x37, 0xFF, 0x00, 0x44, 0x00, 0x55, // Node 3
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x2F, 0xFF, 0x00, 0x55, // Node 4
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x1F, 0xFF // Node 5
+};
+
+/*
+ *
+ * 4 5
+ * |\ /|
+ * |/ \|
+ * 2 3
+ * | |
+ * 0---1
+ */
+static u8 const amdHtTopologySixTwistedLadder[] = {
+ 0x06,
+ 0x06, 0xFF, 0x04, 0x11, 0x02, 0x22, 0x00, 0x11, 0x02, 0x22, 0x00, 0x12, // Node0
+ 0x08, 0x00, 0x09, 0xFF, 0x00, 0x00, 0x01, 0x33, 0x00, 0x03, 0x01, 0x33, // Node1
+ 0x30, 0x00, 0x00, 0x00, 0x31, 0xFF, 0x00, 0x54, 0x21, 0x44, 0x00, 0x55, // Node2
+ 0x00, 0x11, 0x30, 0x11, 0x00, 0x45, 0x32, 0xFF, 0x00, 0x44, 0x12, 0x55, // Node3
+ 0x00, 0x22, 0x00, 0x32, 0x08, 0x22, 0x00, 0x33, 0x0C, 0xFF, 0x00, 0x32, // Node4
+ 0x00, 0x23, 0x00, 0x33, 0x00, 0x22, 0x04, 0x33, 0x00, 0x23, 0x0C, 0xFF // Node5
+};
+
+
+static u8 const amdHtTopologySevenFully[] = {
+ 0x07,
+ 0x7E, 0xFF, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, 0x00, 0x66, // Node 0
+ 0x00, 0x00, 0x7D, 0xFF, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, 0x00, 0x66, // Node 1
+ 0x00, 0x00, 0x00, 0x11, 0x7B, 0xFF, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, 0x00, 0x66, // Node 2
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x77, 0xFF, 0x00, 0x44, 0x00, 0x55, 0x00, 0x66, // Node 3
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x6F, 0xFF, 0x00, 0x55, 0x00, 0x66, // Node 4
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x5F, 0xFF, 0x00, 0x66, // Node 5
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, 0x3F, 0xFF, // Node 6
+};
+
+
+/* 6
+ * |
+ * 4 5
+ * |\ /|
+ * |/ \|
+ * 2 3
+ * | |
+ * 0---1
+ */
+static u8 const amdHtTopologySevenTwistedLadder[] = {
+ 0x07,
+ 0x06, 0xFF, 0x00, 0x11, 0x02, 0x22, 0x00, 0x12, 0x00, 0x22, 0x00, 0x22, 0x00, 0x22, // Node0
+ 0x00, 0x00, 0x09, 0xFF, 0x00, 0x03, 0x01, 0x33, 0x00, 0x33, 0x00, 0x33, 0x00, 0x33, // Node1
+ 0x30, 0x00, 0x00, 0x50, 0x31, 0xFF, 0x00, 0x54, 0x21, 0x44, 0x01, 0x55, 0x21, 0x44, // Node2
+ 0x00, 0x41, 0x30, 0x11, 0x00, 0x45, 0x32, 0xFF, 0x02, 0x44, 0x12, 0x55, 0x02, 0x44, // Node3
+ 0x48, 0x22, 0x40, 0x33, 0x48, 0x22, 0x40, 0x33, 0x4C, 0xFF, 0x40, 0x32, 0x0C, 0x66, // Node4
+ 0x00, 0x22, 0x04, 0x33, 0x00, 0x22, 0x04, 0x33, 0x00, 0x23, 0x0C, 0xFF, 0x00, 0x23, // Node5
+ 0x00, 0x44, 0x00, 0x44, 0x00, 0x44, 0x00, 0x44, 0x00, 0x44, 0x00, 0x44, 0x10, 0xFF // Node6
+};
+
+
+/*
+ * 5--4
+ * /####\
+ * 6######3
+ * |######|
+ * 7######2
+ * \####/
+ * 0--1
+ */
+static u8 const amdHtTopologyEightFully [] = {
+ 0x08,
+ 0xFE, 0xFF, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, 0x00, 0x66, 0x00, 0x77, // Node 0
+ 0x00, 0x00, 0xFD, 0xFF, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, 0x00, 0x66, 0x00, 0x77, // Node 1
+ 0x00, 0x00, 0x00, 0x11, 0xFB, 0xFF, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, 0x00, 0x66, 0x00, 0x77, // Node 2
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0xF7, 0xFF, 0x00, 0x44, 0x00, 0x55, 0x00, 0x66, 0x00, 0x77, // Node 3
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0xEF, 0xFF, 0x00, 0x55, 0x00, 0x66, 0x00, 0x77, // Node 4
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0xDF, 0xFF, 0x00, 0x66, 0x00, 0x77, // Node 5
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, 0xBF, 0xFF, 0x00, 0x77, // Node 6
+ 0x00, 0x00, 0x00, 0x11, 0x00, 0x22, 0x00, 0x33, 0x00, 0x44, 0x00, 0x55, 0x00, 0x66, 0x7F, 0xFF // Node 7
+};
+
+
+/* 6---7
+ * | |
+ * 4---5
+ * | |
+ * 2---3
+ * | |
+ * 0---1
+ */
+static u8 const amdHtTopologyEightStraightLadder[] = {
+ 0x08,
+ 0x06, 0xFF, 0x00, 0x11, 0x02, 0x22, 0x00, 0x22, 0x02, 0x22, 0x00, 0x22, 0x02, 0x22, 0x00, 0x22, // Node0
+ 0x00, 0x00, 0x09, 0xFF, 0x00, 0x33, 0x01, 0x33, 0x00, 0x33, 0x01, 0x33, 0x00, 0x33, 0x01, 0x33, // Node1
+ 0x18, 0x00, 0x00, 0x00, 0x19, 0xFF, 0x00, 0x33, 0x09, 0x44, 0x00, 0x44, 0x09, 0x44, 0x00, 0x44, // Node2
+ 0x00, 0x11, 0x24, 0x11, 0x00, 0x22, 0x26, 0xFF, 0x00, 0x55, 0x06, 0x55, 0x00, 0x55, 0x06, 0x55, // Node3
+ 0x60, 0x22, 0x00, 0x22, 0x60, 0x22, 0x00, 0x22, 0x64, 0xFF, 0x00, 0x55, 0x24, 0x66, 0x00, 0x66, // Node4
+ 0x00, 0x33, 0x90, 0x33, 0x00, 0x33, 0x90, 0x33, 0x00, 0x44, 0x98, 0xFF, 0x00, 0x77, 0x18, 0x77, // Node5
+ 0x80, 0x44, 0x00, 0x44, 0x80, 0x44, 0x00, 0x44, 0x80, 0x44, 0x00, 0x44, 0x90, 0xFF, 0x00, 0x77, // Node6
+ 0x00, 0x55, 0x40, 0x55, 0x00, 0x55, 0x40, 0x55, 0x00, 0x55, 0x40, 0x55, 0x00, 0x66, 0x60, 0xFF // Node7
+};
+
+
+/* 6---7
+ * | |
+ * 4 5
+ * |\ /|
+ * |/ \|
+ * 2 3
+ * | |
+ * 0---1
+ */
+static u8 const amdHtTopologyEightTwistedLadder[] = {
+ 0x08,
+ 0x06, 0xFF, 0x00, 0x11, 0x02, 0x22, 0x00, 0x12, 0x00, 0x22, 0x00, 0x22, 0x00, 0x22, 0x00, 0x22, // Node0
+ 0x00, 0x00, 0x09, 0xFF, 0x00, 0x03, 0x01, 0x33, 0x00, 0x33, 0x00, 0x33, 0x00, 0x33, 0x00, 0x33, // Node1
+ 0x30, 0x00, 0x00, 0x50, 0x31, 0xFF, 0x00, 0x54, 0x21, 0x44, 0x01, 0x55, 0x21, 0x44, 0x01, 0x55, // Node2
+ 0x00, 0x41, 0x30, 0x11, 0x00, 0x45, 0x32, 0xFF, 0x02, 0x44, 0x12, 0x55, 0x02, 0x44, 0x12, 0x55, // Node3
+ 0x48, 0x22, 0x40, 0x33, 0x48, 0x22, 0x40, 0x33, 0x4C, 0xFF, 0x00, 0x32, 0x0C, 0x66, 0x00, 0x36, // Node4
+ 0x80, 0x22, 0x84, 0x33, 0x80, 0x22, 0x84, 0x33, 0x00, 0x23, 0x8C, 0xFF, 0x00, 0x27, 0x0C, 0x77, // Node5
+ 0x00, 0x44, 0x00, 0x44, 0x00, 0x44, 0x00, 0x44, 0x80, 0x44, 0x00, 0x74, 0x90, 0xFF, 0x00, 0x77, // Node6
+ 0x00, 0x55, 0x00, 0x55, 0x00, 0x55, 0x00, 0x55, 0x00, 0x65, 0x40, 0x55, 0x00, 0x66, 0x60, 0xFF // Node7
+};
+
+static const u8 * const amd_topo_list[] = {
+ amdHtTopologySingleNode,
+ amdHtTopologyDualNode,
+ amdHtTopologyThreeLine,
+ amdHtTopologyTriangle,
+ amdHtTopologyFourLine,
+ amdHtTopologyFourStar,
+ amdHtTopologyFourDegenerate,
+ amdHtTopologyFourSquare,
+ amdHtTopologyFourKite,
+ amdHtTopologyFourFully,
+ amdHtTopologyFiveFully,
+ amdHtTopologySixFully,
+ amdHtTopologySevenFully,
+ amdHtTopologyEightFully,
+ amdHtTopologyEightTwistedLadder,
+ amdHtTopologyEightStraightLadder,
+ NULL // NULL to mark end of list
+};
+
+/*----------------------------------------------------------------------------
+ * FUNCTIONS PROTOTYPE
+ *
+ *----------------------------------------------------------------------------
+ */
+void getAmdTopolist(u8 ***p);
+
+
+#endif /* HTTOPO_H */
+
diff --git a/src/northbridge/amd/amdht/h3ncmn.c b/src/northbridge/amd/amdht/h3ncmn.c
new file mode 100644
index 0000000000..f03139f914
--- /dev/null
+++ b/src/northbridge/amd/amdht/h3ncmn.c
@@ -0,0 +1,2214 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+/*----------------------------------------------------------------------------
+ * MODULES USED
+ *
+ *----------------------------------------------------------------------------
+ */
+
+#undef FILECODE
+#define FILECODE 0xF002
+#include "h3finit.h"
+#include "h3ffeat.h"
+#include "h3ncmn.h"
+#include "AsPsNb.h"
+
+
+/*----------------------------------------------------------------------------
+ * DEFINITIONS AND MACROS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/* CPU Northbridge Functions */
+#define CPU_HTNB_FUNC_00 0
+#define CPU_HTNB_FUNC_04 4
+#define CPU_ADDR_FUNC_01 1
+#define CPU_NB_FUNC_03 3
+
+/* Function 0 registers */
+#define REG_ROUTE0_0X40 0x40
+#define REG_ROUTE1_0X44 0x44
+#define REG_NODE_ID_0X60 0x60
+#define REG_UNIT_ID_0X64 0x64
+#define REG_LINK_TRANS_CONTROL_0X68 0x68
+#define REG_LINK_INIT_CONTROL_0X6C 0x6C
+#define REG_HT_CAP_BASE_0X80 0x80
+#define REG_HT_LINK_RETRY0_0X130 0x130
+#define REG_HT_TRAFFIC_DIST_0X164 0x164
+#define REG_HT_LINK_EXT_CONTROL0_0X170 0x170
+
+#define HT_CONTROL_CLEAR_CRC (~(3 << 8))
+
+/* Function 1 registers */
+#define REG_ADDR_CONFIG_MAP0_1XE0 0xE0
+#define CPU_ADDR_NUM_CONFIG_MAPS 4
+
+/* Function 3 registers */
+#define REG_NB_SRI_XBAR_BUF_3X70 0x70
+#define REG_NB_MCT_XBAR_BUF_3X78 0x78
+#define REG_NB_FIFOPTR_3XDC 0xDC
+#define REG_NB_CAPABILITY_3XE8 0xE8
+#define REG_NB_CPUID_3XFC 0xFC
+#define REG_NB_LINK_XCS_TOKEN0_3X148 0x148
+#define REG_NB_DOWNCORE_3X190 0x190
+
+/* Function 4 registers */
+
+
+/*----------------------------------------------------------------------------
+ * TYPEDEFS AND STRUCTURES
+ *
+ *----------------------------------------------------------------------------
+ */
+/*----------------------------------------------------------------------------
+ * PROTOTYPES OF LOCAL FUNCTIONS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/***************************************************************************
+ *** FAMILY/NORTHBRIDGE SPECIFIC FUNCTIONS ***
+ ***************************************************************************/
+
+/**----------------------------------------------------------------------------------------
+ *
+ * SBDFO
+ * makeLinkBase(u8 currentNode, u8 currentLink)
+ *
+ * Description:
+ * Private to northbridge implementation. Return the HT Host capability base
+ * PCI config address for a link.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node this link is on
+ * @param[in] u8 link = the link
+ * @param[out] SBDFO result = the pci config address
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+static SBDFO makeLinkBase(u8 node, u8 link)
+{
+ SBDFO linkBase;
+
+ /* With rev F can not be called with a 4th link or with the sublinks */
+ if (link < 4)
+ linkBase = MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_HT_CAP_BASE_0X80 + link*HT_HOST_CAP_SIZE);
+ else
+ linkBase = MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_04,
+ REG_HT_CAP_BASE_0X80 + (link-4)*HT_HOST_CAP_SIZE);
+ return linkBase;
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * setHtControlRegisterBits(SBDFO reg, u8 hiBit, u8 loBit, u32 *pValue)
+ *
+ * Description:
+ * Private to northbridge implementation. Provide a common routine for accessing the
+ * HT Link Control registers (84, a4, c4, e4), to enforce not clearing the
+ * HT CRC error bits. Replaces direct use of AmdPCIWriteBits().
+ * NOTE: This routine is called for IO Devices as well as CPUs!
+ *
+ * Parameters:
+ * @param[in] SBDFO reg = the PCI config address the control register
+ * @param[in] u8 hiBit = the high bit number
+ * @param[in] u8 loBit = the low bit number
+ * @param[in] u8 pValue = the value to write to that bit range. Bit 0 => loBit.
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+static void setHtControlRegisterBits(SBDFO reg, u8 hiBit, u8 loBit, u32 *pValue)
+{
+ u32 temp, mask;
+
+ ASSERT((hiBit < 32) && (loBit < 32) && (hiBit >= loBit) && ((reg & 0x3) == 0));
+ ASSERT((hiBit < 8) || (loBit > 9));
+
+ /* A 1<<32 == 1<<0 due to x86 SHL instruction, so skip if that is the case */
+ if ((hiBit-loBit) != 31)
+ mask = (((u32)1 << (hiBit-loBit+1))-1);
+ else
+ mask = (u32)0xFFFFFFFF;
+
+ AmdPCIRead(reg, &temp);
+ temp &= ~(mask << loBit);
+ temp |= (*pValue & mask) << loBit;
+ temp &= (u32)HT_CONTROL_CLEAR_CRC;
+ AmdPCIWrite(reg, &temp);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * writeRoutingTable(u8 node, u8 target, u8 Link, cNorthBridge *nb)
+ *
+ * Description:
+ * This routine will modify the routing tables on the
+ * SourceNode to cause it to route both request and response traffic to the
+ * targetNode through the specified Link.
+ *
+ * NOTE: This routine is to be used for early discovery and initialization. The
+ * final routing tables must be loaded some other way because this
+ * routine does not address the issue of probes, or independent request
+ * response paths.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will have it's routing tables modified.
+ * @param[in] u8 target = For routing to node target
+ * @param[in] u8 Link = Link from node to target
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+
+void writeRoutingTable(u8 node, u8 target, u8 link, cNorthBridge *nb)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u32 temp = (nb->selfRouteResponseMask | nb->selfRouteRequestMask) << (link + 1);
+ ASSERT((node < nb->maxNodes) && (target < nb->maxNodes) && (link < nb->maxLinks));
+ AmdPCIWrite(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_ROUTE0_0X40 + target*4),
+ &temp);
+#else
+ STOP_HERE;
+#endif
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * writeNodeID(u8 node, u8 nodeID, cNorthBridge *nb)
+ *
+ * Description:
+ * Modifies the NodeID register on the target node
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will have its NodeID altered.
+ * @param[in] u8 nodeID = the new value for NodeID
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+
+void writeNodeID(u8 node, u8 nodeID, cNorthBridge *nb)
+{
+ u32 temp = nodeID;
+ ASSERT((node < nb->maxNodes) && (nodeID < nb->maxNodes));
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_NODE_ID_0X60),
+ 2, 0, &temp);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * readDefLnk(u8 node, cNorthBridge *nb)
+ *
+ * Description:
+ * Read the DefLnk (the source link of the current packet)
+ * from node
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will have its NodeID altered.
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 result = The HyperTransport link where the request to
+ * read the default link came from. Since this
+ * code is running on the BSP, this should be the link
+ * pointing back towards the BSP.
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+
+u8 readDefLnk(u8 node, cNorthBridge *nb)
+{
+ u32 deflink = 0;
+ SBDFO licr;
+ u32 temp;
+
+ licr = MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_LINK_INIT_CONTROL_0X6C);
+
+ ASSERT((node < nb->maxNodes));
+ AmdPCIReadBits(licr, 3, 2, &deflink);
+ AmdPCIReadBits(licr, 8, 8, &temp); /* on rev F, this bit is reserved == 0 */
+ deflink |= temp << 2;
+ return (u8)deflink;
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * enableRoutingTables(u8 node, cNorthBridge *nb)
+ *
+ * Description:
+ * Turns routing tables on for a given node
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will have it's routing tables enabled
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+
+void enableRoutingTables(u8 node, cNorthBridge *nb)
+{
+ u32 temp = 0;
+ ASSERT((node < nb->maxNodes));
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_LINK_INIT_CONTROL_0X6C),
+ 0, 0, &temp);
+}
+
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * verifyLinkIsCoherent(u8 node, u8 Link, cNorthBridge *nbk)
+ *
+ * Description:
+ * Verify that the link is coherent, connected, and ready
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] u8 link = the link on that Node to examine
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 result = true - The link has the following status
+ * linkCon=1, Link is connected
+ * InitComplete=1, Link initialization is complete
+ * NC=0, Link is coherent
+ * UniP-cLDT=0, Link is not Uniprocessor cLDT
+ * LinkConPend=0 Link connection is not pending
+ * false- The link has some other status
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+
+BOOL verifyLinkIsCoherent(u8 node, u8 link, cNorthBridge *nb)
+{
+#ifndef HT_BUILD_NC_ONLY
+
+ u32 linkType;
+ SBDFO linkBase;
+
+ ASSERT((node < nb->maxNodes) && (link < nb->maxLinks));
+
+ linkBase = makeLinkBase(node, link);
+
+ // FN0_98/A4/C4 = LDT Type Register
+ AmdPCIRead(linkBase + HTHOST_LINK_TYPE_REG, &linkType);
+
+ // Verify LinkCon=1, InitComplete=1, NC=0, UniP-cLDT=0, LinkConPend=0
+ return (linkType & HTHOST_TYPE_MASK) == HTHOST_TYPE_COHERENT;
+#else
+ return 0;
+#endif /* HT_BUILD_NC_ONLY */
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * bool
+ * readTrueLinkFailStatus(u8 node, u8 link, sMainData *pDat, cNorthBridge *nb)
+ *
+ * Description:
+ * Return the LinkFailed status AFTER an attempt is made to clear the bit.
+ * Also, call event notify if a Hardware Fault caused a synch flood on a previous boot.
+ *
+ * The table below summarizes correct responses of this routine.
+ * Family before after unconnected Notify? return
+ * 0F 0 0 0 No 0
+ * 0F 1 0 0 Yes 0
+ * 0F 1 1 X No 1
+ * 10 0 0 0 No 0
+ * 10 1 0 0 Yes 0
+ * 10 1 0 3 No 1
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] u8 link = the link on that node to examine
+ * @param[in] u8 sMainData = access to call back routine
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 result = true - the link is not connected or has hard error
+ * false- if the link is connected
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+BOOL readTrueLinkFailStatus(u8 node, u8 link, sMainData *pDat, cNorthBridge *nb)
+{
+ u32 before, after, unconnected, crc;
+ SBDFO linkBase;
+
+ ASSERT((node < nb->maxNodes) && (link < nb->maxLinks));
+
+ linkBase = makeLinkBase(node, link);
+
+ /* Save the CRC status before doing anything else.
+ * Read, Clear, the Re-read the error bits in the Link Control Register
+ * FN0_84/A4/C4[4] = LinkFail bit
+ * and the connection status, TransOff and EndOfChain
+ */
+ AmdPCIReadBits(linkBase + HTHOST_LINK_CONTROL_REG, 9, 8, &crc);
+ AmdPCIReadBits(linkBase + HTHOST_LINK_CONTROL_REG, 4, 4, &before);
+ setHtControlRegisterBits(linkBase + HTHOST_LINK_CONTROL_REG, 4, 4, &before);
+ AmdPCIReadBits(linkBase + HTHOST_LINK_CONTROL_REG, 4, 4, &after);
+ AmdPCIReadBits(linkBase + HTHOST_LINK_CONTROL_REG, 7, 6, &unconnected);
+
+ if (before != after)
+ {
+ if (!unconnected)
+ {
+ if (crc != 0)
+ {
+ /* A synch flood occurred due to HT CRC */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ /* Pass the node and link on which the generic synch flood event occurred. */
+ sHtEventHWHtCrc evt = {sizeof(sHtEventHWHtCrc), node, link, (u8)crc};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_HW_FAULT,
+ HT_EVENT_HW_HTCRC,
+ (u8 *)&evt);
+ }
+ }
+ else
+ {
+ /* Some synch flood occurred */
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ /* Pass the node and link on which the generic synch flood event occurred. */
+ sHtEventHWSynchFlood evt = {sizeof(sHtEventHWSynchFlood), node, link};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_HW_FAULT,
+ HT_EVENT_HW_SYNCHFLOOD,
+ (u8 *)&evt);
+ }
+ }
+ }
+ }
+ return ((after != 0) || unconnected);
+}
+
+
+/**----------------------------------------------------------------------------------------
+ *
+ * u8
+ * readToken(u8 node, cNorthBridge *nb)
+ *
+ * Description:
+ * Read the token stored in the scratchpad register
+ * NOTE: The location used to store the token is arbitrary. The only
+ * requirement is that the location warm resets to zero, and that
+ * using it will have no ill-effects during HyperTransport initialization.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 result = the Token read from the node
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+u8 readToken(u8 node, cNorthBridge *nb)
+{
+ u32 temp;
+
+ ASSERT((node < nb->maxNodes));
+ /* Use CpuCnt as a scratch register */
+ /* Limiting use to 4 bits makes code GH to rev F compatible. */
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_NODE_ID_0X60),
+ 19, 16, &temp);
+
+ return (u8)temp;
+}
+
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * writeToken(u8 node, u8 Value, cNorthBridge *nb)
+ *
+ * Description:
+ * Write the token stored in the scratchpad register
+ * NOTE: The location used to store the token is arbitrary. The only
+ * requirement is that the location warm resets to zero, and that
+ * using it will have no ill-effects during HyperTransport initialization.
+ * Limiting use to 4 bits makes code GH to rev F compatible.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void writeToken(u8 node, u8 value, cNorthBridge *nb)
+{
+ u32 temp = value;
+ ASSERT((node < nb->maxNodes));
+ /* Use CpuCnt as a scratch register */
+ /* Limiting use to 4 bits makes code GH to rev F compatible. */
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_NODE_ID_0X60),
+ 19, 16, &temp);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * u8
+ * fam0FGetNumCoresOnNode(u8 node, cNorthBridge *nb)
+ *
+ * Description:
+ * Return the number of cores (1 based count) on node.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 result = the number of cores
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+u8 fam0FGetNumCoresOnNode(u8 node, cNorthBridge *nb)
+{
+ u32 temp;
+
+ ASSERT((node < nb->maxNodes));
+ /* Read CmpCap */
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_CAPABILITY_3XE8),
+ 13, 12, &temp);
+
+ /* and add one */
+ return (u8)(temp+1);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * u8
+ * fam10GetNumCoresOnNode(u8 node, cNorthBridge *nb)
+ *
+ * Description:
+ * Return the number of cores (1 based count) on node.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 result = the number of cores
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+u8 fam10GetNumCoresOnNode(u8 node, cNorthBridge *nb)
+{
+ u32 temp, leveling, cores;
+ u8 i;
+
+ ASSERT((node < nb->maxNodes));
+ /* Read CmpCap */
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_CAPABILITY_3XE8),
+ 13, 12, &temp);
+
+ /* Support Downcoring */
+ cores = temp + 1;
+ AmdPCIReadBits (MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_DOWNCORE_3X190),
+ 3, 0, &leveling);
+ for (i=0; i<cores; i++)
+ {
+ if (leveling & ((u32) 1 << i))
+ {
+ temp--;
+ }
+ }
+ return (u8)(temp+1);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * setTotalNodesAndCores(u8 node, u8 totalNodes, u8 totalCores, cNorthBridge *nb)
+ *
+ * Description:
+ * Write the total number of cores and nodes to the node
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] u8 totalNodes = the total number of nodes
+ * @param[in] u8 totalCores = the total number of cores
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void setTotalNodesAndCores(u8 node, u8 totalNodes, u8 totalCores, cNorthBridge *nb)
+{
+ SBDFO nodeIDReg;
+ u32 temp;
+
+ ASSERT((node < nb->maxNodes) && (totalNodes <= nb->maxNodes));
+ nodeIDReg = MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_NODE_ID_0X60);
+
+ temp = totalCores-1;
+ /* Rely on max number of nodes:cores for rev F and GH to make
+ * this code work, even though we write reserved bit 20 on rev F it will be
+ * zero in that case.
+ */
+ AmdPCIWriteBits(nodeIDReg, 20, 16, &temp);
+ temp = totalNodes-1;
+ AmdPCIWriteBits(nodeIDReg, 6, 4, &temp);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * limitNodes(u8 node, cNorthBridge *nb)
+ *
+ * Description:
+ * Limit coherent config accesses to cpus as indicated by nodecnt.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void limitNodes(u8 node, cNorthBridge *nb)
+{
+ u32 temp = 1;
+ ASSERT((node < nb->maxNodes));
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_LINK_TRANS_CONTROL_0X68),
+ 15, 15, &temp);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * writeFullRoutingTable(u8 node, u8 target, u8 reqLink, u8 rspLink, u32 BClinks, cNorthBridge *nb)
+ *
+ * Description:
+ * Write the routing table entry for node to target, using the request link, response
+ * link, and broadcast links provided.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] u8 target = the target node for these routes
+ * @param[in] u8 reqLink = the link for requests to target
+ * @param[in] u8 rspLink = the link for responses to target
+ * @param[in] u32 bClinks = the broadcast links
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void writeFullRoutingTable(u8 node, u8 target, u8 reqLink, u8 rspLink, u32 bClinks, cNorthBridge *nb)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u32 value = 0;
+
+ ASSERT((node < nb->maxNodes) && (target < nb->maxNodes));
+ if (reqLink == ROUTETOSELF)
+ value |= nb->selfRouteRequestMask;
+ else
+ value |= nb->selfRouteRequestMask << (reqLink+1);
+
+ if (rspLink == ROUTETOSELF)
+ value |= nb->selfRouteResponseMask;
+ else
+ value |= nb->selfRouteResponseMask << (rspLink+1);
+
+ /* Allow us to accept a Broadcast ourselves, then set broadcasts for routes */
+ value |= (u32)1 << nb->broadcastSelfBit;
+ value |= (u32)bClinks << (nb->broadcastSelfBit + 1);
+
+ AmdPCIWrite(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_HTNB_FUNC_00,
+ REG_ROUTE0_0X40 + target*4), &value);
+#else
+ STOP_HERE;
+#endif /* HT_BUILD_NC_ONLY */
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * static u32
+ * makeKey(u8 currentNode)
+ *
+ * Description:
+ * Private routine to northbridge code.
+ * Determine whether a node is compatible with the discovered configuration so
+ * far. Currently, that means the family, extended family of the new node are the
+ * same as the BSP's.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node
+ * @param[out] u32 result = the key value
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+static u32 makeKey(u8 node)
+{
+ u32 extFam, baseFam;
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_CPUID_3XFC),
+ 27, 20, &extFam);
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_CPUID_3XFC),
+ 11, 8, &baseFam);
+ return ((u32)(baseFam << 8) | extFam);
+}
+
+
+/**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * isCompatible(u8 currentNode, cNorthBridge *nb)
+ *
+ * Description:
+ * Determine whether a node is compatible with the discovered configuration so
+ * far. Currently, that means the family, extended family of the new node are the
+ * same as the BSP's.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] BOOL result = true: the new is compatible, false: it is not
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+BOOL isCompatible(u8 node, cNorthBridge *nb)
+{
+ return (makeKey(node) == nb->compatibleKey);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * fam0fIsCapable(u8 node, sMainData *pDat, cNorthBridge *nb)
+ *
+ * Description:
+ * Get node capability and update the minimum supported system capability.
+ * Return whether the current configuration exceeds the capability.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node
+ * @param[in,out] sMainData *pDat = sysMpCap (updated) and NodesDiscovered
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] BOOL result = true: system is capable of current config.
+ * false: system is not capable of current config.
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+BOOL fam0fIsCapable(u8 node, sMainData *pDat, cNorthBridge *nb)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u32 temp;
+ u8 maxNodes;
+
+ ASSERT(node < nb->maxNodes);
+
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_CAPABILITY_3XE8),
+ 2, 1, &temp);
+ if (temp > 1)
+ {
+ maxNodes = 8;
+ } else {
+ if (temp == 1)
+ {
+ maxNodes = 2;
+ } else {
+ maxNodes = 1;
+ }
+ }
+ if (pDat->sysMpCap > maxNodes)
+ {
+ pDat->sysMpCap = maxNodes;
+ }
+ /* Note since sysMpCap is one based and NodesDiscovered is zero based, equal is false */
+ return (pDat->sysMpCap > pDat->NodesDiscovered);
+#else
+ return 1;
+#endif
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * fam10IsCapable(u8 node, sMainData *pDat, cNorthBridge *nb)
+ *
+ * Description:
+ * Get node capability and update the minimum supported system capability.
+ * Return whether the current configuration exceeds the capability.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node
+ * @param[in,out] sMainData *pDat = sysMpCap (updated) and NodesDiscovered
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] BOOL result = true: system is capable of current config.
+ * false: system is not capable of current config.
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+BOOL fam10IsCapable(u8 node, sMainData *pDat, cNorthBridge *nb)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u32 temp;
+ u8 maxNodes;
+
+ ASSERT(node < nb->maxNodes);
+
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_CAPABILITY_3XE8),
+ 18, 16, &temp);
+
+ if (temp != 0)
+ {
+ maxNodes = (1 << (~temp & 0x3)); /* That is, 1, 2, 4, or 8 */
+ }
+ else
+ {
+ maxNodes = 8;
+ }
+
+ if (pDat->sysMpCap > maxNodes)
+ {
+ pDat->sysMpCap = maxNodes;
+ }
+ /* Note since sysMpCap is one based and NodesDiscovered is zero based, equal is false */
+ return (pDat->sysMpCap > pDat->NodesDiscovered);
+#else
+ return 1;
+#endif
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * fam0fStopLink(u8 currentNode, u8 currentLink, cNorthBridge *nb)
+ *
+ * Description:
+ * Disable a cHT link on node by setting F0x[E4, C4, A4, 84][TransOff, EndOfChain]=1
+ *
+ * Parameters:
+ * @param[in] u8 node = the node this link is on
+ * @param[in] u8 link = the link to stop
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void fam0fStopLink(u8 node, u8 link, cNorthBridge *nb)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u32 temp;
+ SBDFO linkBase;
+
+ ASSERT((node < nb->maxNodes) && (link < nb->maxLinks));
+
+ linkBase = makeLinkBase(node, link);
+
+ /* Set TransOff, EndOfChain */
+ temp = 3;
+ setHtControlRegisterBits(linkBase + HTHOST_LINK_CONTROL_REG, 7, 6, &temp);
+#endif
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * commonVoid()
+ *
+ * Description:
+ * Nothing.
+ *
+ * Parameters:
+ * None.
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void commonVoid()
+{
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * commonReturnFalse()
+ *
+ * Description:
+ * Return False.
+ *
+ * Parameters:
+ * @param[out] BOOL result = false
+ * ---------------------------------------------------------------------------------------
+ */
+BOOL commonReturnFalse()
+{
+ return 0;
+}
+
+/***************************************************************************
+ *** Non-coherent init code ***
+ *** Northbridge access routines ***
+ ***************************************************************************/
+
+/**----------------------------------------------------------------------------------------
+ *
+ * u8
+ * readSbLink(cNorthBridge *nb)
+ *
+ * Description:
+ * Return the link to the Southbridge
+ *
+ * Parameters:
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 results = the link to the southbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+u8 readSbLink(cNorthBridge *nb)
+{
+ u32 temp;
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(0),
+ makePCIBusFromNode(0),
+ makePCIDeviceFromNode(0),
+ CPU_HTNB_FUNC_00,
+ REG_UNIT_ID_0X64),
+ 10, 8, &temp);
+ return (u8)temp;
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * BOOL
+ * verifyLinkIsNonCoherent(u8 node, u8 link, cNorthBridge *nb)
+ *
+ * Description:
+ * Verify that the link is non-coherent, connected, and ready
+ *
+ * Parameters:
+ * @param[in] u8 node = the node that will be examined
+ * @param[in] u8 link = the Link on that node to examine
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 results = true - The link has the following status
+ * LinkCon=1, Link is connected
+ * InitComplete=1,Link initilization is complete
+ * NC=1, Link is coherent
+ * UniP-cLDT=0, Link is not Uniprocessor cLDT
+ * LinkConPend=0 Link connection is not pending
+ * false- The link has some other status
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+BOOL verifyLinkIsNonCoherent(u8 node, u8 link, cNorthBridge *nb)
+{
+ u32 linkType;
+ SBDFO linkBase;
+
+ ASSERT((node < nb->maxNodes) && (link < nb->maxLinks));
+
+ linkBase = makeLinkBase(node, link);
+
+ /* FN0_98/A4/C4 = LDT Type Register */
+ AmdPCIRead(linkBase + HTHOST_LINK_TYPE_REG, &linkType);
+
+ /* Verify linkCon=1, InitComplete=1, NC=0, UniP-cLDT=0, LinkConPend=0 */
+ return (linkType & HTHOST_TYPE_MASK) == HTHOST_TYPE_NONCOHERENT;
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * ht3SetCFGAddrMap(u8 cfgMapIndex, u8 secBus, u8 subBus, u8 targetNode, u8 targetLink, sMainData *pDat, cNorthBridge *nb)
+ *
+ * Description:
+ * Configure and enable config access to a non-coherent chain for the given bus range.
+ *
+ * Parameters:
+ * @param[in] u8 cfgRouteIndex = the map entry to set
+ * @param[in] u8 secBus = The secondary bus number to use
+ * @param[in] u8 subBus = The subordinate bus number to use
+ * @param[in] u8 targetNode = The node that shall be the recipient of the traffic
+ * @param[in] u8 targetLink = The link that shall be the recipient of the traffic
+ * @param[in] sMainData* pDat = our global state
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void ht3SetCFGAddrMap(u8 cfgMapIndex, u8 secBus, u8 subBus, u8 targetNode, u8 targetLink, sMainData *pDat, cNorthBridge *nb)
+{
+ u8 curNode;
+ SBDFO linkBase;
+ u32 temp;
+
+ linkBase = makeLinkBase(targetNode, targetLink);
+
+ ASSERT(secBus <= subBus);
+ temp = secBus;
+ AmdPCIWriteBits(linkBase + HTHOST_ISOC_REG, 15, 8, &temp);
+
+ /* For target link, note that rev F uses bits 9:8 and only with GH is bit 10
+ * set to indicate a sublink. For node, we are currently not supporting Extended
+ * routing tables.
+ */
+ temp = ((u32)subBus << 24) + ((u32)secBus << 16) + ((u32)targetLink << 8)
+ + ((u32)targetNode << 4) + (u32)3;
+ for (curNode = 0; curNode < pDat->NodesDiscovered+1; curNode++)
+ AmdPCIWrite(MAKE_SBDFO(makePCISegmentFromNode(curNode),
+ makePCIBusFromNode(curNode),
+ makePCIDeviceFromNode(curNode),
+ CPU_ADDR_FUNC_01,
+ REG_ADDR_CONFIG_MAP0_1XE0 + 4*cfgMapIndex),
+ &temp);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * ht1SetCFGAddrMap(u8 cfgMapIndex, u8 secBus, u8 subBus, u8 targetNode, u8 targetLink, sMainData *pDat, cNorthBridge *nb)
+ *
+ * Description:
+ * Configure and enable config access to a non-coherent chain for the given bus range.
+ *
+ * Parameters:
+ * @param[in] u8 cfgMapIndex = the map entry to set
+ * @param[in] u8 secBus = The secondary bus number to use
+ * @param[in] u8 subBus = The subordinate bus number to use
+ * @param[in] u8 targetNode = The node that shall be the recipient of the traffic
+ * @param[in] u8 targetLink = The link that shall be the recipient of the traffic
+ * @param[in] sMainData* pDat = our global state
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void ht1SetCFGAddrMap(u8 cfgMapIndex, u8 secBus, u8 subBus, u8 targetNode, u8 targetLink, sMainData *pDat, cNorthBridge *nb)
+{
+ u8 curNode;
+ SBDFO linkBase;
+ u32 temp;
+
+ linkBase = makeLinkBase(targetNode, targetLink);
+
+ ASSERT(secBus <= subBus);
+ temp = secBus;
+ AmdPCIWriteBits(linkBase + HTHOST_ISOC_REG, 15, 8, &temp);
+
+ temp = subBus;
+ AmdPCIWriteBits(linkBase + HTHOST_ISOC_REG, 23, 16, &temp);
+
+ /* For target link, note that rev F uses bits 9:8 and only with GH is bit 10
+ * set to indicate a sublink. For node, we are currently not supporting Extended
+ * routing tables.
+ */
+ temp = ((u32)subBus << 24) + ((u32)secBus << 16) + ((u32)targetLink << 8)
+ + ((u32)targetNode << 4) + (u32)3;
+ for (curNode = 0; curNode < pDat->NodesDiscovered+1; curNode++)
+ AmdPCIWrite(MAKE_SBDFO(makePCISegmentFromNode(curNode),
+ makePCIBusFromNode(curNode),
+ makePCIDeviceFromNode(curNode),
+ CPU_ADDR_FUNC_01,
+ REG_ADDR_CONFIG_MAP0_1XE0 + 4*cfgMapIndex),
+ &temp);
+}
+
+/***************************************************************************
+ *** Link Optimization ***
+ ***************************************************************************/
+
+/**----------------------------------------------------------------------------------------
+ *
+ * u8
+ * convertBitsToWidth(u8 value, cNorthBridge *nb)
+ *
+ * Description:
+ * Given the bits set in the register field, return the width it represents
+ *
+ * Parameters:
+ * @param[in] u8 value = The bits for the register
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 results = The width
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+u8 convertBitsToWidth(u8 value, cNorthBridge *nb)
+{
+ if (value == 1) {
+ return 16;
+ } else if (value == 0) {
+ return 8;
+ } else if (value == 5) {
+ return 4;
+ } else if (value == 4) {
+ return 2;
+ }
+ STOP_HERE; // This is an error internal condition
+
+ return 0xFF; // make the compiler happy.
+
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * u8
+ * convertWidthToBits(u8 value, cNorthBridge *nb)
+ *
+ * Description:
+ * Translate a desired width setting to the bits to set in the register field
+ *
+ * Parameters:
+ * @param[in] u8 value = The width
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u8 results = The bits for the register
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+u8 convertWidthToBits(u8 value, cNorthBridge *nb)
+{
+ if (value == 16) {
+ return 1;
+ } else if (value == 8) {
+ return 0;
+ } else if (value == 4) {
+ return 5;
+ } else if (value == 2) {
+ return 4;
+ }
+ STOP_HERE; // This is an internal error condition
+
+ return 0xFF; // make the compiler happy.
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * u16
+ * ht1NorthBridgeFreqMask(u8 NodeID, cNorthBridge *nb)
+ *
+ * Description:
+ * Return a mask that eliminates HT frequencies that cannot be used due to a slow
+ * northbridge frequency.
+ *
+ * Parameters:
+ * @param[in] u8 node = Result could (later) be for a specific node
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u16 results = Frequency mask
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+u16 ht1NorthBridgeFreqMask(u8 node, cNorthBridge *nb)
+{
+ /* only up to HT1 speeds */
+ return (HT_FREQUENCY_LIMIT_HT1_ONLY);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * u16
+ * fam10NorthBridgeFreqMask(u8 NodeID, cNorthBridge *nb)
+ *
+ * Description:
+ * Return a mask that eliminates HT frequencies that cannot be used due to a slow
+ * northbridge frequency.
+ *
+ * Parameters:
+ * @param[in] u8 node = Result could (later) be for a specific node
+ * @param[in] cNorthBridge *nb = this northbridge
+ * @param[out] u16 results = Frequency mask
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+u16 fam10NorthBridgeFreqMask(u8 node, cNorthBridge *nb)
+{
+ u8 nbCOF;
+ u16 supported;
+
+ nbCOF = getMinNbCOF();
+ /*
+ * nbCOF is minimum northbridge speed in hundreds of MHz.
+ * HT can not go faster than the minimum speed of the northbridge.
+ */
+ if ((nbCOF >= 6) && (nbCOF <= 26))
+ {
+ /* Convert frequency to bit and all less significant bits,
+ * by setting next power of 2 and subtracting 1.
+ */
+ supported = ((u16)1 << ((nbCOF >> 1) + 2)) - 1;
+ }
+ else if (nbCOF > 26)
+ {
+ supported = HT_FREQUENCY_LIMIT_2600M;
+ }
+ /* unlikely cases, but include as a defensive measure, also avoid trick above */
+ else if (nbCOF == 4)
+ {
+ supported = HT_FREQUENCY_LIMIT_400M;
+ }
+ else if (nbCOF == 2)
+ {
+ supported = HT_FREQUENCY_LIMIT_200M;
+ }
+ else
+ {
+ STOP_HERE;
+ supported = HT_FREQUENCY_LIMIT_200M;
+ }
+
+ return (fixEarlySampleFreqCapability(supported));
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * gatherLinkData(sMainData *pDat, cNorthBridge *nb)
+ *
+ * Description:
+ * For all discovered links, populate the port list with the frequency and width
+ * capabilities.
+ *
+ * Parameters:
+ * @param[in,out] sMainData* pDat = our global state, port list
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void gatherLinkData(sMainData *pDat, cNorthBridge *nb)
+{
+ u8 i;
+ SBDFO linkBase;
+ u32 temp;
+
+ for (i = 0; i < pDat->TotalLinks*2; i++)
+ {
+ if (pDat->PortList[i].Type == PORTLIST_TYPE_CPU)
+ {
+ linkBase = makeLinkBase(pDat->PortList[i].NodeID, pDat->PortList[i].Link);
+
+ pDat->PortList[i].Pointer = linkBase;
+
+ AmdPCIReadBits(linkBase + HTHOST_LINK_CONTROL_REG, 22, 20, &temp);
+ pDat->PortList[i].PrvWidthOutCap = convertBitsToWidth((u8)temp, pDat->nb);
+
+ AmdPCIReadBits(linkBase + HTHOST_LINK_CONTROL_REG, 18, 16, &temp);
+ pDat->PortList[i].PrvWidthInCap = convertBitsToWidth((u8)temp, pDat->nb);
+
+ AmdPCIReadBits(linkBase + HTHOST_FREQ_REV_REG, 31, 16, &temp);
+ pDat->PortList[i].PrvFrequencyCap = (u16)temp & 0x7FFF
+ & nb->northBridgeFreqMask(pDat->PortList[i].NodeID, pDat->nb); // Mask off bit 15, reserved value
+ }
+ else
+ {
+ linkBase = pDat->PortList[i].Pointer;
+ if (pDat->PortList[i].Link == 1)
+ linkBase += HTSLAVE_LINK01_OFFSET;
+
+ AmdPCIReadBits(linkBase + HTSLAVE_LINK_CONTROL_0_REG, 22, 20, &temp);
+ pDat->PortList[i].PrvWidthOutCap = convertBitsToWidth((u8)temp, pDat->nb);
+
+ AmdPCIReadBits(linkBase + HTSLAVE_LINK_CONTROL_0_REG, 18, 16, &temp);
+ pDat->PortList[i].PrvWidthInCap = convertBitsToWidth((u8)temp, pDat->nb);
+
+ AmdPCIReadBits(linkBase + HTSLAVE_FREQ_REV_0_REG, 31, 16, &temp);
+ pDat->PortList[i].PrvFrequencyCap = (u16)temp;
+
+ if (pDat->HtBlock->AMD_CB_DeviceCapOverride)
+ {
+ linkBase &= 0xFFFFF000;
+ AmdPCIRead(linkBase, &temp);
+
+ pDat->HtBlock->AMD_CB_DeviceCapOverride(
+ pDat->PortList[i].NodeID,
+ pDat->PortList[i].HostLink,
+ pDat->PortList[i].HostDepth,
+ (u8)SBDFO_SEG(pDat->PortList[i].Pointer),
+ (u8)SBDFO_BUS(pDat->PortList[i].Pointer),
+ (u8)SBDFO_DEV(pDat->PortList[i].Pointer),
+ temp,
+ pDat->PortList[i].Link,
+ &(pDat->PortList[i].PrvWidthInCap),
+ &(pDat->PortList[i].PrvWidthOutCap),
+ &(pDat->PortList[i].PrvFrequencyCap));
+ }
+ }
+ }
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * setLinkData(sMainData *pDat, cNorthBridge *nb)
+ *
+ * Description:
+ * Change the hardware state for all links according to the now optimized data in the
+ * port list data structure.
+ *
+ * Parameters:
+ * @param[in] sMainData* pDat = our global state, port list
+ * @param[in] cNorthBridge *nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void setLinkData(sMainData *pDat, cNorthBridge *nb)
+{
+ u8 i;
+ SBDFO linkBase;
+ u32 temp, widthin, widthout, bits;
+
+ for (i = 0; i < pDat->TotalLinks*2; i++)
+ {
+
+ ASSERT(pDat->PortList[i&0xFE].SelWidthOut == pDat->PortList[(i&0xFE)+1].SelWidthIn);
+ ASSERT(pDat->PortList[i&0xFE].SelWidthIn == pDat->PortList[(i&0xFE)+1].SelWidthOut);
+ ASSERT(pDat->PortList[i&0xFE].SelFrequency == pDat->PortList[(i&0xFE)+1].SelFrequency);
+
+ if (pDat->PortList[i].SelRegang)
+ {
+ ASSERT(pDat->PortList[i].Type == PORTLIST_TYPE_CPU);
+ ASSERT(pDat->PortList[i].Link < 4);
+ temp = 1;
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(pDat->PortList[i].NodeID),
+ makePCIBusFromNode(pDat->PortList[i].NodeID),
+ makePCIDeviceFromNode(pDat->PortList[i].NodeID),
+ CPU_HTNB_FUNC_00,
+ REG_HT_LINK_EXT_CONTROL0_0X170 + 4*pDat->PortList[i].Link),
+ 0, 0, &temp);
+ }
+
+ if (pDat->PortList[i].Type == PORTLIST_TYPE_CPU)
+ {
+ if (pDat->HtBlock->AMD_CB_OverrideCpuPort)
+ pDat->HtBlock->AMD_CB_OverrideCpuPort(pDat->PortList[i].NodeID,
+ pDat->PortList[i].Link,
+ &(pDat->PortList[i].SelWidthIn),
+ &(pDat->PortList[i].SelWidthOut),
+ &(pDat->PortList[i].SelFrequency));
+ }
+ else
+ {
+ if (pDat->HtBlock->AMD_CB_OverrideDevicePort)
+ pDat->HtBlock->AMD_CB_OverrideDevicePort(pDat->PortList[i].NodeID,
+ pDat->PortList[i].HostLink,
+ pDat->PortList[i].HostDepth,
+ pDat->PortList[i].Link,
+ &(pDat->PortList[i].SelWidthIn),
+ &(pDat->PortList[i].SelWidthOut),
+ &(pDat->PortList[i].SelFrequency));
+ }
+
+ linkBase = pDat->PortList[i].Pointer;
+ if ((pDat->PortList[i].Type == PORTLIST_TYPE_IO) && (pDat->PortList[i].Link == 1))
+ linkBase += HTSLAVE_LINK01_OFFSET;
+
+ /* Some IO devices don't work properly when setting widths, so write them in a single operation,
+ * rather than individually.
+ */
+ widthout = convertWidthToBits(pDat->PortList[i].SelWidthOut, pDat->nb);
+ ASSERT(widthout == 1 || widthout == 0 || widthout == 5 || widthout == 4);
+ widthin = convertWidthToBits(pDat->PortList[i].SelWidthIn, pDat->nb);
+ ASSERT(widthin == 1 || widthin == 0 || widthin == 5 || widthin == 4);
+
+ temp = (widthin & 7) | ((widthout & 7) << 4);
+ setHtControlRegisterBits(linkBase + HTHOST_LINK_CONTROL_REG, 31, 24, &temp);
+
+ temp = pDat->PortList[i].SelFrequency;
+ if (pDat->PortList[i].Type == PORTLIST_TYPE_CPU)
+ {
+ ASSERT((temp >= HT_FREQUENCY_600M && temp <= HT_FREQUENCY_2600M)
+ || (temp == HT_FREQUENCY_200M) || (temp == HT_FREQUENCY_400M));
+ AmdPCIWriteBits(linkBase + HTHOST_FREQ_REV_REG, 11, 8, &temp);
+ if (temp > HT_FREQUENCY_1000M) // Gen1 = 200Mhz -> 1000MHz, Gen3 = 1200MHz -> 2600MHz
+ {
+ /* Enable for Gen3 frequencies */
+ temp = 1;
+ }
+ else
+ {
+ /* Disable for Gen1 frequencies */
+ temp = 0;
+ }
+ /* HT3 retry mode enable / disable */
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(pDat->PortList[i].NodeID),
+ makePCIBusFromNode(pDat->PortList[i].NodeID),
+ makePCIDeviceFromNode(pDat->PortList[i].NodeID),
+ CPU_HTNB_FUNC_00,
+ REG_HT_LINK_RETRY0_0X130 + 4*pDat->PortList[i].Link),
+ 0, 0, &temp);
+ /* and Scrambling enable / disable */
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(pDat->PortList[i].NodeID),
+ makePCIBusFromNode(pDat->PortList[i].NodeID),
+ makePCIDeviceFromNode(pDat->PortList[i].NodeID),
+ CPU_HTNB_FUNC_00,
+ REG_HT_LINK_EXT_CONTROL0_0X170 + 4*pDat->PortList[i].Link),
+ 3, 3, &temp);
+ }
+ else
+ {
+ SBDFO currentPtr;
+ BOOL isFound;
+
+ ASSERT(temp <= HT_FREQUENCY_2600M);
+ /* Write the frequency setting */
+ AmdPCIWriteBits(linkBase + HTSLAVE_FREQ_REV_0_REG, 11, 8, &temp);
+
+ /* Handle additional HT3 frequency requirements, if needed,
+ * or clear them if switching down to ht1 on a warm reset.
+ * Gen1 = 200Mhz -> 1000MHz, Gen3 = 1200MHz -> 2600MHz
+ *
+ * Even though we assert if debugging, we need to check that the capability was found
+ * always, since this is an unknown hardware device, also we are taking
+ * unqualified frequency from the call backs
+ * (could be trying to do ht3 on an ht1 IO device).
+ */
+
+ if (temp > HT_FREQUENCY_1000M)
+ {
+ /* Enabling features if gen 3 */
+ bits = 1;
+ }
+ else
+ {
+ /* Disabling features if gen 1 */
+ bits = 0;
+ }
+
+ /* Retry Enable */
+ isFound = FALSE;
+ currentPtr = linkBase & (u32)0xFFFFF000; /* Set PCI Offset to 0 */
+ do
+ {
+ AmdPCIFindNextCap(&currentPtr);
+ if (currentPtr != ILLEGAL_SBDFO)
+ {
+ AmdPCIRead(currentPtr, &temp);
+ /* HyperTransport Retry Capability? */
+ if (IS_HT_RETRY_CAPABILITY(temp))
+ {
+ ASSERT(pDat->PortList[i].Link < 2);
+ AmdPCIWriteBits(currentPtr + HTRETRY_CONTROL_REG,
+ pDat->PortList[i].Link*16,
+ pDat->PortList[i].Link*16,
+ &bits);
+ isFound = TRUE;
+ }
+ /* Some other capability, keep looking */
+ }
+ else
+ {
+ /* If we are turning it off, that may mean the device was only ht1 capable,
+ * so don't complain that we can't do it.
+ */
+ if (bits != 0)
+ {
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventOptRequiredCap evt ={sizeof(sHtEventOptRequiredCap),
+ pDat->PortList[i].NodeID,
+ pDat->PortList[i].HostLink,
+ pDat->PortList[i].HostDepth};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_WARNING,
+ HT_EVENT_OPT_REQUIRED_CAP_RETRY,
+ (u8 *)&evt);
+ }
+ STOP_HERE;
+ }
+ isFound = TRUE;
+ }
+ } while (!isFound);
+
+ /* Scrambling enable */
+ isFound = FALSE;
+ currentPtr = linkBase & (u32)0xFFFFF000; /* Set PCI Offset to 0 */
+ do
+ {
+ AmdPCIFindNextCap(&currentPtr);
+ if (currentPtr != ILLEGAL_SBDFO)
+ {
+ AmdPCIRead(currentPtr, &temp);
+ /* HyperTransport Gen3 Capability? */
+ if (IS_HT_GEN3_CAPABILITY(temp))
+ {
+ ASSERT(pDat->PortList[i].Link < 2);
+ AmdPCIWriteBits((currentPtr +
+ HTGEN3_LINK_TRAINING_0_REG +
+ pDat->PortList[i].Link*HTGEN3_LINK01_OFFSET),
+ 3, 3, &bits);
+ isFound = TRUE;
+ }
+ /* Some other capability, keep looking */
+ }
+ else
+ {
+ /* If we are turning it off, that may mean the device was only ht1 capable,
+ * so don't complain that we can't do it.
+ */
+ if (bits != 0)
+ {
+ if (pDat->HtBlock->AMD_CB_EventNotify)
+ {
+ sHtEventOptRequiredCap evt ={sizeof(sHtEventOptRequiredCap),
+ pDat->PortList[i].NodeID,
+ pDat->PortList[i].HostLink,
+ pDat->PortList[i].HostDepth};
+
+ pDat->HtBlock->AMD_CB_EventNotify(HT_EVENT_CLASS_WARNING,
+ HT_EVENT_OPT_REQUIRED_CAP_GEN3,
+ (u8 *)&evt);
+ }
+ STOP_HERE;
+ }
+ isFound = TRUE;
+ }
+ } while (!isFound);
+ }
+ }
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * fam0fWriteHTLinkCmdBufferAlloc(u8 node, u8 link, u8 req, u8 preq, u8 rsp, u8 prb)
+ *
+ * Description:
+ * Set the command buffer allocations in the buffer count register for the node and link.
+ * The command buffer settings in the low 16 bits are the same on both
+ * family 10h and family 0fh northbridges.
+ *
+ * Parameters:
+ * @param[in] u8 node = The node to set allocations on
+ * @param[in] u8 link = the link to set allocations on
+ * @param[in] u8 req = non-posted Request Command Buffers
+ * @param[in] u8 preq = Posted Request Command Buffers
+ * @param[in] u8 rsp = Response Command Buffers
+ * @param[in] u8 prb = Probe Command Buffers
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+static void fam0fWriteHTLinkCmdBufferAlloc(u8 node, u8 link, u8 req, u8 preq, u8 rsp, u8 prb)
+{
+ u32 temp;
+ SBDFO currentPtr;
+
+ currentPtr = makeLinkBase(node, link);
+ currentPtr += HTHOST_BUFFER_COUNT_REG;
+
+ /* non-posted Request Command Buffers */
+ temp = req;
+ AmdPCIWriteBits(currentPtr, 3, 0, &temp);
+ /* Posted Request Command Buffers */
+ temp = preq;
+ AmdPCIWriteBits(currentPtr, 7, 4, &temp);
+ /* Response Command Buffers */
+ temp = rsp;
+ AmdPCIWriteBits(currentPtr, 11, 8, &temp);
+ /* Probe Command Buffers */
+ temp = prb;
+ AmdPCIWriteBits(currentPtr, 15, 12, &temp);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * fam0fWriteHTLinkDatBufferAlloc(u8 node, u8 link, u8 reqD, u8 preqD, u8 rspD)
+ *
+ * Description:
+ * Set the data buffer allocations in the buffer count register for the node and link.
+ * The command buffer settings in the high 16 bits are not the same on both
+ * family 10h and family 0fh northbridges.
+ *
+ * Parameters:
+ * @param[in] u8 node = The node to set allocations on
+ * @param[in] u8 link = the link to set allocations on
+ * @param[in] u8 reqD = non-posted Request Data Buffers
+ * @param[in] u8 preqD = Posted Request Data Buffers
+ * @param[in] u8 rspD = Response Data Buffers
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+static void fam0fWriteHTLinkDatBufferAlloc(u8 node, u8 link, u8 reqD, u8 preqD, u8 rspD)
+{
+ u32 temp;
+ SBDFO currentPtr;
+
+ currentPtr = makeLinkBase(node, link);
+ currentPtr += HTHOST_BUFFER_COUNT_REG;
+
+ /* Request Data Buffers */
+ temp = reqD;
+ AmdPCIWriteBits(currentPtr, 18, 16, &temp);
+ /* Posted Request Data Buffers */
+ temp = preqD;
+ AmdPCIWriteBits(currentPtr, 22, 20, &temp);
+ /* Response Data Buffers */
+ temp = rspD;
+ AmdPCIWriteBits(currentPtr, 26, 24, &temp);
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * ht3WriteTrafficDistribution(u32 links01, u32 links10, cNorthBridge *nb)
+ *
+ * Description:
+ * Set the traffic distribution register for the links provided.
+ *
+ * Parameters:
+ * @param[in] u32 links01 = coherent links from node 0 to 1
+ * @param[in] u32 links10 = coherent links from node 1 to 0
+ * @param[in] cNorthBridge* nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void ht3WriteTrafficDistribution(u32 links01, u32 links10, cNorthBridge *nb)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u32 temp;
+
+ /* Node 0 */
+ /* DstLnk */
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(0),
+ makePCIBusFromNode(0),
+ makePCIDeviceFromNode(0),
+ CPU_HTNB_FUNC_00,
+ REG_HT_TRAFFIC_DIST_0X164),
+ 23, 16, &links01);
+ /* DstNode = 1, cHTPrbDistEn=1, cHTRspDistEn=1, cHTReqDistEn=1 */
+ temp = 0x0107;
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(0),
+ makePCIBusFromNode(0),
+ makePCIDeviceFromNode(0),
+ CPU_HTNB_FUNC_00,
+ REG_HT_TRAFFIC_DIST_0X164),
+ 15, 0, &temp);
+
+ /* Node 1 */
+ /* DstLnk */
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(1),
+ makePCIBusFromNode(1),
+ makePCIDeviceFromNode(1),
+ CPU_HTNB_FUNC_00,
+ REG_HT_TRAFFIC_DIST_0X164),
+ 23, 16, &links10);
+ /* DstNode = 0, cHTPrbDistEn=1, cHTRspDistEn=1, cHTReqDistEn=1 */
+ temp = 0x0007;
+ AmdPCIWriteBits(MAKE_SBDFO(makePCISegmentFromNode(1),
+ makePCIBusFromNode(1),
+ makePCIDeviceFromNode(1),
+ CPU_HTNB_FUNC_00,
+ REG_HT_TRAFFIC_DIST_0X164),
+ 15, 0, &temp);
+#endif /* HT_BUILD_NC_ONLY */
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * ht1WriteTrafficDistribution(u32 links01, u32 links10, cNorthBridge *nb)
+ *
+ * Description:
+ * Traffic distribution is more complex in this case as the routing table must be
+ * adjusted to use one link for requests and the other for responses. Also,
+ * perform the buffer tunings on the links required for this config.
+ *
+ * Parameters:
+ * @param[in] u32 links01 = coherent links from node 0 to 1
+ * @param[in] u32 links01 = coherent links from node 1 to 0
+ * @param[in] cNorthBridge* nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void ht1WriteTrafficDistribution(u32 links01, u32 links10, cNorthBridge *nb)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u32 route01, route10;
+ u8 req0, req1, rsp0, rsp1, nclink;
+
+ /*
+ * Get the current request route for 0->1 and 1->0. This will indicate which of the links
+ * in links01 are connected to which links in links10. Since we have to route to distribute
+ * traffic, we need to know that. The link used by htinit will become the request, probe link.
+ * the other link will be used for responses.
+ */
+
+ /* Get the routes, and hang on to them, we will write them back updated. */
+ AmdPCIRead(MAKE_SBDFO(makePCISegmentFromNode(0),
+ makePCIBusFromNode(0),
+ makePCIDeviceFromNode(0),
+ CPU_HTNB_FUNC_00,
+ REG_ROUTE1_0X44),
+ &route01);
+ AmdPCIRead(MAKE_SBDFO(makePCISegmentFromNode(1),
+ makePCIBusFromNode(1),
+ makePCIDeviceFromNode(1),
+ CPU_HTNB_FUNC_00,
+ REG_ROUTE0_0X40),
+ &route10);
+
+ /* Convert the request routes to a link number. Note "0xE" is ht1 nb specific.
+ * Find the response link numbers.
+ */
+ ASSERT((route01 & 0xE) && (route10 & 0xE)); /* no route! error! */
+ req0 = (u8)AmdBitScanReverse((route01 & 0xE)) - 1;
+ req1 = (u8)AmdBitScanReverse((route10 & 0xE)) - 1;
+ /* Now, find the other link for the responses */
+ rsp0 = (u8)AmdBitScanReverse((links01 & ~((u32)1 << req0)));
+ rsp1 = (u8)AmdBitScanReverse((links10 & ~((u32)1 << req1)));
+
+ /* ht1 nb restriction, must have exactly two links */
+ ASSERT(((((links01 & ~((u32)1 << req0)) & ~((u32)1 << rsp0))) == 0)
+ && ((((links10 & ~((u32)1 << req1)) & ~((u32)1 << rsp1))) == 0));
+
+ route01 = (route01 & ~0x0E00) | ((u32)0x0100<<(rsp0 + 1));
+ route10 = (route10 & ~0x0E00) | ((u32)0x0100<<(rsp1 + 1));
+
+ AmdPCIWrite(MAKE_SBDFO(makePCISegmentFromNode(0),
+ makePCIBusFromNode(0),
+ makePCIDeviceFromNode(0),
+ CPU_HTNB_FUNC_00,
+ REG_ROUTE1_0X44),
+ &route01);
+
+ AmdPCIWrite(MAKE_SBDFO(makePCISegmentFromNode(1),
+ makePCIBusFromNode(1),
+ makePCIDeviceFromNode(1),
+ CPU_HTNB_FUNC_00,
+ REG_ROUTE0_0X40),
+ &route10);
+
+ /* While we otherwise do buffer tunings elsewhere, for the dual cHT DP case with
+ * ht1 northbridges like family 0Fh, do the tunings here where we have all the
+ * link and route info at hand and don't need to recalculate it.
+ */
+
+ /* Node 0, Request / Probe Link (note family F only has links < 4) */
+ fam0fWriteHTLinkCmdBufferAlloc(0, req0, 6, 3, 1, 6);
+ fam0fWriteHTLinkDatBufferAlloc(0, req0, 4, 3, 1);
+ /* Node 0, Response Link (note family F only has links < 4) */
+ fam0fWriteHTLinkCmdBufferAlloc(0, rsp0, 1, 0, 15, 0);
+ fam0fWriteHTLinkDatBufferAlloc(0, rsp0, 1, 1, 6);
+ /* Node 1, Request / Probe Link (note family F only has links < 4) */
+ fam0fWriteHTLinkCmdBufferAlloc(1, req1, 6, 3, 1, 6);
+ fam0fWriteHTLinkDatBufferAlloc(1, req1, 4, 3, 1);
+ /* Node 1, Response Link (note family F only has links < 4) */
+ fam0fWriteHTLinkCmdBufferAlloc(1, rsp1, 1, 0, 15, 0);
+ fam0fWriteHTLinkDatBufferAlloc(1, rsp1, 1, 1, 6);
+
+ /* Node 0, is the third link non-coherent? */
+ nclink = (u8)AmdBitScanReverse(((u8)0x07 & ~((u32)1 << req0) & ~((u32)1 << rsp0)));
+ if (nb->verifyLinkIsNonCoherent(0, nclink, nb))
+ {
+ fam0fWriteHTLinkCmdBufferAlloc(0, nclink, 6, 5, 2, 0);
+ }
+
+ /* Node 1, is the third link non-coherent? */
+ nclink = (u8)AmdBitScanReverse(((u8)0x07 & ~((u32)1 << req1) & ~((u32)1 << rsp1)));
+ if (nb->verifyLinkIsNonCoherent(1, nclink, nb))
+ {
+ fam0fWriteHTLinkCmdBufferAlloc(1, nclink, 6, 5, 2, 0);
+ }
+#endif /* HT_BUILD_NC_ONLY */
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * fam0fBufferOptimizations(u8 node, sMainData *pDat, cNorthBridge *nb)
+ *
+ * Description:
+ * Buffer tunings are inherently northbridge specific. Check for specific configs
+ * which require adjustments and apply any standard workarounds to this node.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node to
+ * @param[in] sMainData *pDat = coherent links from node 0 to 1
+ * @param[in] cNorthBridge* nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void fam0fBufferOptimizations(u8 node, sMainData *pDat, cNorthBridge *nb)
+{
+#ifndef HT_BUILD_NC_ONLY
+ u8 i;
+ u32 temp;
+ SBDFO currentPtr;
+
+ ASSERT(node < nb->maxNodes);
+
+ /* Fix the FIFO pointer register before changing speeds */
+ currentPtr = MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_FIFOPTR_3XDC);
+ for (i=0; i < nb->maxLinks; i++)
+ {
+ temp = 0;
+ if (nb->verifyLinkIsCoherent(node, i, nb))
+ {
+ temp = 0x26;
+ ASSERT(i<3);
+ AmdPCIWriteBits(currentPtr, 8*i + 5, 8*i, &temp);
+ }
+ else
+ {
+ if (nb->verifyLinkIsNonCoherent(node, i, nb))
+ {
+ temp = 0x25;
+ ASSERT(i<3);
+ AmdPCIWriteBits(currentPtr, 8*i + 5, 8*i, &temp);
+ }
+ }
+ }
+ /*
+ * 8P Buffer tuning.
+ * Either apply the BKDG tunings or, if applicable, apply the more restrictive errata 153
+ * workaround.
+ * If 8 nodes, Check this node for 'inner' or 'outer'.
+ * Tune each link based on coherent or non-coherent
+ */
+ if (pDat->NodesDiscovered >= 6)
+ {
+ u8 j;
+ BOOL isOuter;
+ BOOL isErrata153;
+
+ /* This is for family 0Fh, so assuming dual core max then 7 or 8 nodes are required
+ * to be in the situation of 14 or more cores. We checked nodes above, cross check
+ * that the number of cores is 14 or more. We want both 14 cores with at least 7 or 8 nodes
+ * not one condition alone, to apply the errata 153 workaround. Otherwise, 7 or 8 rev F
+ * nodes use the BKDG tuning.
+ */
+
+ isErrata153 = 0;
+
+ AmdPCIReadBits (MAKE_SBDFO(makePCISegmentFromNode(0),
+ makePCIBusFromNode(0),
+ makePCIDeviceFromNode(0),
+ CPU_HTNB_FUNC_00,
+ REG_NODE_ID_0X60),
+ 19, 16, &temp);
+
+ if (temp >= 14)
+ {
+ /* Check whether we need to do errata 153 tuning or BKDG tuning.
+ * Errata 153 applies to JH-1, JH-2 and older. It is fixed in JH-3
+ * (and, one assumes, from there on).
+ */
+ for (i=0; i < (pDat->NodesDiscovered +1); i++)
+ {
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(i),
+ makePCIBusFromNode(i),
+ makePCIDeviceFromNode(i),
+ CPU_NB_FUNC_03,
+ REG_NB_CPUID_3XFC),
+ 7, 0, &temp);
+ if (((u8)temp & ~0x40) < 0x13)
+ {
+ isErrata153 = 1;
+ break;
+ }
+ }
+ }
+
+ for (i=0; i < CPU_ADDR_NUM_CONFIG_MAPS; i++)
+ {
+ isOuter = FALSE;
+ /* Check for outer node by scanning the config maps on node 0 for one
+ * which is assigned to this node.
+ */
+ currentPtr = MAKE_SBDFO(makePCISegmentFromNode(0),
+ makePCIBusFromNode(0),
+ makePCIDeviceFromNode(0),
+ CPU_ADDR_FUNC_01,
+ REG_ADDR_CONFIG_MAP0_1XE0 + (4 * i));
+ AmdPCIReadBits (currentPtr, 1, 0, &temp);
+ /* Make sure this config map is valid, if it is it will be enabled for read/write */
+ if (temp == 3)
+ {
+ /* It's valid, get the node (that node is an outer node) */
+ AmdPCIReadBits (currentPtr, 6, 4, &temp);
+ /* Is the node we're working on now? */
+ if (node == (u8)temp)
+ {
+ /* This is an outer node. Tune it appropriately. */
+ for (j=0; j < nb->maxLinks; j++)
+ {
+ if (isErrata153)
+ {
+ if (nb->verifyLinkIsCoherent(node, j, nb))
+ {
+ fam0fWriteHTLinkCmdBufferAlloc(node, j, 1, 1, 6, 4);
+ }
+ else
+ {
+ if (nb->verifyLinkIsNonCoherent(node, j, nb))
+ {
+ fam0fWriteHTLinkCmdBufferAlloc(node, j, 5, 4, 1, 0);
+ }
+ }
+ }
+ else
+ {
+ if (nb->verifyLinkIsCoherent(node, j, nb))
+ {
+ fam0fWriteHTLinkCmdBufferAlloc(node, j, 1, 1, 8, 5);
+ }
+ }
+ }
+ /*
+ * SRI to XBAR Buffer Counts are correct for outer node at power on defaults.
+ */
+ isOuter = TRUE;
+ break;
+ }
+ }
+ /* We fill config maps in ascending order, so if we didn't use this one, we're done. */
+ else break;
+ }
+ if (!isOuter)
+ {
+ if (isErrata153)
+ {
+ /* Tuning for inner node coherent links */
+ for (j=0; j < nb->maxLinks; j++)
+ {
+ if (nb->verifyLinkIsCoherent(node, j, nb))
+ {
+ fam0fWriteHTLinkCmdBufferAlloc(node, j, 2, 1, 5, 4);
+ }
+
+ }
+ /* SRI to XBAR Buffer Count for inner nodes, zero DReq and DPReq */
+ temp = 0;
+ AmdPCIWriteBits (MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_SRI_XBAR_BUF_3X70),
+ 31, 28, &temp);
+ }
+ }
+
+ /*
+ * Tune MCT to XBAR Buffer Count the same an all nodes, 2 Probes, 5 Response
+ */
+ if (isErrata153)
+ {
+ temp = 0x25;
+ AmdPCIWriteBits (MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_MCT_XBAR_BUF_3X78),
+ 14, 8, &temp);
+ }
+ }
+#endif /* HT_BUILD_NC_ONLY */
+}
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * fam10BufferOptimizations(u8 node, sMainData *pDat, cNorthBridge *nb)
+ *
+ * Description:
+ * Buffer tunings are inherently northbridge specific. Check for specific configs
+ * which require adjustments and apply any standard workarounds to this node.
+ *
+ * Parameters:
+ * @param[in] u8 node = the node to tune
+ * @param[in] sMainData *pDat = global state
+ * @param[in] cNorthBridge* nb = this northbridge
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void fam10BufferOptimizations(u8 node, sMainData *pDat, cNorthBridge *nb)
+{
+ u32 temp;
+ SBDFO currentPtr;
+ u8 i;
+
+ ASSERT(node < nb->maxNodes);
+
+ /*
+ * Link to XCS Token Count Tuning
+ *
+ * For each active link that we reganged (so this unfortunately can't go into the PCI reg
+ * table), we have to switch the Link to XCS Token Counts to the ganged state.
+ * We do this here for the non-uma case, which is to write the values that would have
+ * been power on defaults if the link was ganged at cold reset.
+ */
+ for (i = 0; i < pDat->TotalLinks*2; i++)
+ {
+ if ((pDat->PortList[i].NodeID == node) && (pDat->PortList[i].Type == PORTLIST_TYPE_CPU))
+ {
+ /* If the link is greater than 4, this is a sublink 1, so it is not reganged. */
+ if (pDat->PortList[i].Link < 4)
+ {
+ currentPtr = MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_LINK_XCS_TOKEN0_3X148 + 4*pDat->PortList[i].Link);
+ if (pDat->PortList[i].SelRegang)
+ {
+ /* Handle all the regang Token count adjustments */
+
+ /* Sublink 0: [Probe0tok] = 2 [Rsp0tok] = 2 [PReq0tok] = 2 [Req0tok] = 2 */
+ temp = 0xAA;
+ AmdPCIWriteBits(currentPtr, 7, 0, &temp);
+ /* Sublink 1: [Probe1tok] = 0 [Rsp1tok] = 0 [PReq1tok] = 0 [Req1tok] = 0 */
+ temp = 0;
+ AmdPCIWriteBits(currentPtr, 23, 16, &temp);
+ /* [FreeTok] = 3 */
+ temp = 3;
+ AmdPCIWriteBits(currentPtr, 15, 14, &temp);
+ }
+ else
+ {
+ /* Read the regang bit in hardware */
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(pDat->PortList[i].NodeID),
+ makePCIBusFromNode(pDat->PortList[i].NodeID),
+ makePCIDeviceFromNode(pDat->PortList[i].NodeID),
+ CPU_HTNB_FUNC_00,
+ REG_HT_LINK_EXT_CONTROL0_0X170 + 4*pDat->PortList[i].Link),
+ 0, 0, &temp);
+ if (temp == 1)
+ {
+ /* handle a minor adjustment for stapped ganged links. If SelRegang is false we
+ * didn't do the regang, so if the bit is on then it's hardware strapped.
+ */
+
+ /* [FreeTok] = 3 */
+ temp = 3;
+ AmdPCIWriteBits(currentPtr, 15, 14, &temp);
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * North Bridge 'constructor'.
+ *
+ */
+
+/**----------------------------------------------------------------------------------------
+ *
+ * void
+ * newNorthBridge(u8 node, cNorthBridge *nb)
+ *
+ * Description:
+ * Construct a new northbridge. This routine encapsulates knowledge of how to tell
+ * significant differences between families of supported northbridges and what routines
+ * can be used in common and which are unique. A fully populated northbridge interface
+ * is provided by nb.
+ *
+ * Parameters:
+ * @param[in] node u8 = create a northbridge interface for this node.
+ * @param[out] cNorthBridge* nb = the caller's northbridge structure to initialize.
+ *
+ * ---------------------------------------------------------------------------------------
+ */
+void newNorthBridge(u8 node, cNorthBridge *nb)
+{
+ u32 match;
+ u32 extFam, baseFam, model;
+
+ cNorthBridge fam10 =
+ {
+#ifdef HT_BUILD_NC_ONLY
+ 8,
+ 1,
+ 12,
+#else
+ 8,
+ 8,
+ 64,
+#endif /* HT_BUILD_NC_ONLY*/
+ writeRoutingTable,
+ writeNodeID,
+ readDefLnk,
+ enableRoutingTables,
+ verifyLinkIsCoherent,
+ readTrueLinkFailStatus,
+ readToken,
+ writeToken,
+ fam10GetNumCoresOnNode,
+ setTotalNodesAndCores,
+ limitNodes,
+ writeFullRoutingTable,
+ isCompatible,
+ fam10IsCapable,
+ (void (*)(u8, u8, cNorthBridge*))commonVoid,
+ (BOOL (*)(u8, u8, sMainData*, cNorthBridge*))commonReturnFalse,
+ readSbLink,
+ verifyLinkIsNonCoherent,
+ ht3SetCFGAddrMap,
+ convertBitsToWidth,
+ convertWidthToBits,
+ fam10NorthBridgeFreqMask,
+ gatherLinkData,
+ setLinkData,
+ ht3WriteTrafficDistribution,
+ fam10BufferOptimizations,
+ 0x00000001,
+ 0x00000200,
+ 18,
+ 0x00000f01
+ };
+
+ cNorthBridge fam0f =
+ {
+#ifdef HT_BUILD_NC_ONLY
+ 3,
+ 1,
+ 12,
+#else
+ 3,
+ 8,
+ 32,
+#endif /* HT_BUILD_NC_ONLY*/
+ writeRoutingTable,
+ writeNodeID,
+ readDefLnk,
+ enableRoutingTables,
+ verifyLinkIsCoherent,
+ readTrueLinkFailStatus,
+ readToken,
+ writeToken,
+ fam0FGetNumCoresOnNode,
+ setTotalNodesAndCores,
+ limitNodes,
+ writeFullRoutingTable,
+ isCompatible,
+ fam0fIsCapable,
+ fam0fStopLink,
+ (BOOL (*)(u8, u8, sMainData*, cNorthBridge*))commonReturnFalse,
+ readSbLink,
+ verifyLinkIsNonCoherent,
+ ht1SetCFGAddrMap,
+ convertBitsToWidth,
+ convertWidthToBits,
+ ht1NorthBridgeFreqMask,
+ gatherLinkData,
+ setLinkData,
+ ht1WriteTrafficDistribution,
+ fam0fBufferOptimizations,
+ 0x00000001,
+ 0x00000100,
+ 16,
+ 0x00000f00
+ };
+
+ /* Start with enough of the key to identify the northbridge interface */
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_CPUID_3XFC),
+ 27, 20, &extFam);
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_CPUID_3XFC),
+ 11, 8, &baseFam);
+ AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
+ makePCIBusFromNode(node),
+ makePCIDeviceFromNode(node),
+ CPU_NB_FUNC_03,
+ REG_NB_CPUID_3XFC),
+ 7, 4, &model);
+ match = (u32)((baseFam << 8) | extFam);
+
+ /* Test each in turn looking for a match. Init the struct if found */
+ if (match == fam10.compatibleKey)
+ {
+ Amdmemcpy((void *)nb, (const void *)&fam10, (u32) sizeof(cNorthBridge));
+ }
+ else
+ {
+ if (match == fam0f.compatibleKey)
+ {
+ Amdmemcpy((void *)nb, (const void *)&fam0f, (u32) sizeof(cNorthBridge));
+ }
+ else
+ {
+ STOP_HERE;
+ }
+ }
+
+ /* Update the initial limited key to the real one, which may include other matching info */
+ nb->compatibleKey = makeKey(node);
+}
+
diff --git a/src/northbridge/amd/amdht/h3ncmn.h b/src/northbridge/amd/amdht/h3ncmn.h
new file mode 100644
index 0000000000..51f82bff6f
--- /dev/null
+++ b/src/northbridge/amd/amdht/h3ncmn.h
@@ -0,0 +1,132 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef H3NCMN_H
+#define H3NCMN_H
+
+/*----------------------------------------------------------------------------
+ * Mixed (DEFINITIONS AND MACROS / TYPEDEFS, STRUCTURES, ENUMS)
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/*-----------------------------------------------------------------------------
+ * DEFINITIONS AND MACROS
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+/* Use a macro to convert a node number to a PCI device. If some future port of
+ * this code needs to, this can easily be replaced by a function call:
+ * u8 makePCIDeviceFromNode(u8 node);
+ */
+#define makePCIDeviceFromNode(node) \
+ ((u8)(24 + node))
+
+/* Use a macro to convert a node number to a PCI bus. If some future port of
+ * this code needs to, this can easily be replaced by a function call:
+ * u8 makePCIBusFromNode(u8 node);
+ */
+#define makePCIBusFromNode(node) \
+ ((u8)(0))
+
+/* Use a macro to convert a node number to a PCI Segment. If some future port of
+ * this code needs to, this can easily be replaced by a function call:
+ * u8 makePCISegmentFromNode(u8 node);
+ */
+#define makePCISegmentFromNode(node) \
+ ((u8)(0))
+
+/* Macros to fix support issues that come up with early sample processors, which
+ * sometimes do things like report capabilities that are actually unsupported.
+ * Use the build flag, HT_BUILD_EARLY_SAMPLE_CPU_SUPPORT, to enable this support.
+ *
+ * It's not envisioned this would be replaced by an external function, but the prototype is
+ * u16 fixEarlySampleFreqCapability(u16 fc);
+ */
+#ifndef HT_BUILD_EARLY_SAMPLE_CPU_SUPPORT
+#define fixEarlySampleFreqCapability(fc) \
+ ((u16)fc)
+#else
+#define fixEarlySampleFreqCapability(fc) \
+ ((u16)fc & HT_FREQUENCY_LIMIT_HT1_ONLY)
+#endif
+
+/*----------------------------------------------------------------------------
+ * TYPEDEFS, STRUCTURES, ENUMS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+struct cNorthBridge
+{
+ /* Public data, clients of northbridge can access */
+ u8 maxLinks;
+ u8 maxNodes;
+ u8 maxPlatformLinks;
+
+ /* Public Interfaces for northbridge clients, coherent init*/
+ void (*writeRoutingTable)(u8 node, u8 target, u8 link, cNorthBridge *nb);
+ void (*writeNodeID)(u8 node, u8 nodeID, cNorthBridge *nb);
+ u8 (*readDefLnk)(u8 node, cNorthBridge *nb);
+ void (*enableRoutingTables)(u8 node, cNorthBridge *nb);
+ BOOL (*verifyLinkIsCoherent)(u8 node, u8 link, cNorthBridge *nb);
+ BOOL (*readTrueLinkFailStatus)(u8 node, u8 link, sMainData *pDat, cNorthBridge *nb);
+ u8 (*readToken)(u8 node, cNorthBridge *nb);
+ void (*writeToken)(u8 node, u8 value, cNorthBridge *nb);
+ u8 (*getNumCoresOnNode)(u8 node, cNorthBridge *nb);
+ void (*setTotalNodesAndCores)(u8 node, u8 totalNodes, u8 totalCores, cNorthBridge *nb);
+ void (*limitNodes)(u8 node, cNorthBridge *nb);
+ void (*writeFullRoutingTable)(u8 node, u8 target, u8 reqLink, u8 rspLink, u32 bClinks, cNorthBridge *nb);
+ BOOL (*isCompatible)(u8 node, cNorthBridge *nb);
+ BOOL (*isCapable)(u8 node, sMainData *pDat, cNorthBridge *nb);
+ void (*stopLink)(u8 node, u8 link, cNorthBridge *nb);
+ BOOL (*handleSpecialLinkCase)(u8 node, u8 link, sMainData *pDat, cNorthBridge *nb);
+
+ /* Public Interfaces for northbridge clients, noncoherent init */
+ u8 (*readSbLink)(cNorthBridge *nb);
+ BOOL (*verifyLinkIsNonCoherent)(u8 node, u8 link, cNorthBridge *nb);
+ void (*setCFGAddrMap)(u8 cfgMapIndex, u8 secBus, u8 subBus, u8 targetNode, u8 targetLink, sMainData *pDat, cNorthBridge *nb);
+
+ /* Public Interfaces for northbridge clients, Optimization */
+ u8 (*convertBitsToWidth)(u8 value, cNorthBridge *nb);
+ u8 (*convertWidthToBits)(u8 value, cNorthBridge *nb);
+ u16 (*northBridgeFreqMask)(u8 node, cNorthBridge *nb);
+ void (*gatherLinkData)(sMainData *pDat, cNorthBridge *nb);
+ void (*setLinkData)(sMainData *pDat, cNorthBridge *nb);
+
+ /* Public Interfaces for northbridge clients, System and performance Tuning. */
+ void (*writeTrafficDistribution)(u32 links01, u32 links10, cNorthBridge *nb);
+ void (*bufferOptimizations)(u8 node, sMainData *pDat, cNorthBridge *nb);
+
+ /* Private Data for northbridge implementation use only */
+ u32 selfRouteRequestMask;
+ u32 selfRouteResponseMask;
+ u8 broadcastSelfBit;
+ u32 compatibleKey;
+} ;
+
+/*----------------------------------------------------------------------------
+ * FUNCTIONS PROTOTYPE
+ *
+ *----------------------------------------------------------------------------
+ */
+void newNorthBridge(u8 node, cNorthBridge *nb);
+
+#endif /* H3NCMN_H */
diff --git a/src/northbridge/amd/amdht/ht_wrapper.c b/src/northbridge/amd/amdht/ht_wrapper.c
new file mode 100644
index 0000000000..6892c63e45
--- /dev/null
+++ b/src/northbridge/amd/amdht/ht_wrapper.c
@@ -0,0 +1,160 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+/*----------------------------------------------------------------------------
+ * TYPEDEFS, DEFINITIONS AND MACROS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/* Single CPU system? */
+#if (CONFIG_MAX_PHYSICAL_CPUS == 1)
+ #define HT_BUILD_NC_ONLY 1
+#endif
+
+/* Debugging Options */
+#define AMD_DEBUG 1
+//#define AMD_DEBUG_ERROR_STOP 1
+
+/*----------------------------------------------------------------------------
+ * MODULES USED
+ *
+ *----------------------------------------------------------------------------
+ */
+
+#undef FILECODE
+#define FILECODE 0xFF01
+#include "comlib.h"
+#include "h3gtopo.h"
+#include "h3finit.h"
+
+/* include the main HT source file */
+#include "h3finit.c"
+
+
+/*----------------------------------------------------------------------------
+ * LOCAL FUNCTIONS
+ *
+ *----------------------------------------------------------------------------
+ */
+
+/* FIXME: Find a better place for these pre-ram functions. */
+#define NODE_HT(x) NODE_PCI(x,0)
+#define NODE_MP(x) NODE_PCI(x,1)
+#define NODE_MC(x) NODE_PCI(x,3)
+#define NODE_LC(x) NODE_PCI(x,4)
+static u32 get_nodes(void)
+{
+ device_t dev;
+ u32 nodes;
+
+ dev = PCI_DEV(CBB, CDB, 0);
+ nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) ;
+#if CONFIG_MAX_PHYSICAL_CPUS > 8
+ nodes += (((pci_read_config32(dev, 0x160)>>4) & 7)<<3);
+#endif
+ nodes++;
+
+ return nodes;
+}
+
+static void enable_apic_ext_id(u32 node)
+{
+ u32 val;
+ val = pci_read_config32(NODE_HT(node), 0x68);
+ val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
+ pci_write_config32(NODE_HT(node), 0x68, val);
+}
+
+
+static void setup_link_trans_cntrl()
+{
+ /* FIXME: Not sure that this belongs here but it is HT related */
+ u32 val;
+ val = pci_read_config32(NODE_HT(0), 0x68);
+ val |= 0x00206800; // DSNpReqLimit, LimitCldtCfg, BufRefPri, RespPassPW per BKDG;
+ pci_write_config32(NODE_HT(0), 0x68, val);
+}
+
+
+
+
+/**
+ * void AMD_CB_EventNotify (u8 evtClass, u16 event, const u8 *pEventData0)
+ *
+ * Needs to be fixed to output the debug structures.
+ *
+ */
+void AMD_CB_EventNotify (u8 evtClass, u16 event, const u8 *pEventData0)
+{
+ printk_debug("AMD_CB_EventNotify()\n");
+ printk_debug("event class: %02x event: %04x\n", evtClass, event);
+
+}
+
+/**
+ * void getAmdTopolist(u8 ***p)
+ *
+ * point to the stock topo list array
+ *
+ */
+void getAmdTopolist(u8 ***p)
+{
+ *p = (u8 **)amd_topo_list;
+}
+
+
+/**
+ * void amd_ht_init(struct sys_info *sysinfo)
+ *
+ * AMD HT init LinuxBIOS wrapper
+ *
+ */
+void amd_ht_init(struct sys_info *sysinfo)
+{
+ AMD_HTBLOCK ht_wrapper = {
+ NULL, // u8 **topolist;
+ 0, // u8 AutoBusStart;
+ 32, // u8 AutoBusMax;
+ 6, // u8 AutoBusIncrement;
+ NULL, // BOOL (*AMD_CB_IgnoreLink)();
+ NULL, // BOOL (*AMD_CB_OverrideBusNumbers)();
+ NULL, // BOOL (*AMD_CB_ManualBUIDSwapList)();
+ NULL, // void (*AMD_CB_DeviceCapOverride)();
+ NULL, // void (*AMD_CB_Cpu2CpuPCBLimits)();
+ NULL, // void (*AMD_CB_IOPCBLimits)();
+ NULL, // BOOL (*AMD_CB_SkipRegang)();
+ NULL, // BOOL (*AMD_CB_CustomizeTrafficDistribution)();
+ NULL, // BOOL (*AMD_CB_CustomizeBuffers)();
+ NULL, // void (*AMD_CB_OverrideDevicePort)();
+ NULL, // void (*AMD_CB_OverrideCpuPort)();
+ AMD_CB_EventNotify // void (*AMD_CB_EventNotify) ();
+ };
+
+ printk_debug("Enter amd_ht_init()\n");
+ amdHtInitialize(&ht_wrapper);
+ printk_debug("Exit amd_ht_init()\n");
+
+
+}
+
+
+
+
diff --git a/src/northbridge/amd/amdht/porting.h b/src/northbridge/amd/amdht/porting.h
new file mode 100644
index 0000000000..534e742fd5
--- /dev/null
+++ b/src/northbridge/amd/amdht/porting.h
@@ -0,0 +1,88 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef PORTING_H
+#define PORTING_H
+
+
+/* For AMD64 or 32-bit GCC */
+typedef int int32;
+typedef unsigned int uint32;
+typedef short int16;
+typedef unsigned short uint16;
+typedef char int8;
+typedef unsigned char uint8;
+
+/* Create the Boolean type */
+#define TRUE 1
+#define FALSE 0
+typedef unsigned char BOOL;
+
+/* Force tight packing of structures */
+#pragma pack(1)
+
+#define CALLCONV
+
+
+typedef struct _uint64
+{
+ uint32 lo;
+ uint32 hi;
+}uint64;
+
+
+/*
+ * SBDFO - Segment Bus Device Function Offset
+ * 31:28 Segment (4-bits)
+ * 27:20 Bus (8-bits)
+ * 19:15 Device (5-bits)
+ * 14:12 Function(3-bits)
+ * 11:00 Offset (12-bits)
+ */
+typedef uint32 SBDFO;
+
+#define MAKE_SBDFO(seg,bus,dev,fun,off) ((((uint32)(seg))<<28) | (((uint32)(bus))<<20) | \
+ (((uint32)(dev))<<15) | (((uint32)(fun))<<12) | ((uint32)(off)))
+#define SBDFO_SEG(x) (((uint32)(x)>>28) & 0x0F)
+#define SBDFO_BUS(x) (((uint32)(x)>>20) & 0xFF)
+#define SBDFO_DEV(x) (((uint32)(x)>>15) & 0x1F)
+#define SBDFO_FUN(x) (((uint32)(x)>>12) & 0x07)
+#define SBDFO_OFF(x) (((uint32)(x)) & 0xFFF)
+#define ILLEGAL_SBDFO 0xFFFFFFFF
+
+void CALLCONV AmdMSRRead(uint32 Address, uint64 *Value);
+void CALLCONV AmdMSRWrite(uint32 Address, uint64 *Value);
+void CALLCONV AmdIORead(uint8 IOSize, uint16 Address, uint32 *Value);
+void CALLCONV AmdIOWrite(uint8 IOSize, uint16 Address, uint32 *Value);
+void CALLCONV AmdMemRead(uint8 MemSize, uint64 *Address, uint32 *Value);
+void CALLCONV AmdMemWrite(uint8 MemSize, uint64 *Address, uint32 *Value);
+void CALLCONV AmdPCIRead(SBDFO loc, uint32 *Value);
+void CALLCONV AmdPCIWrite(SBDFO loc, uint32 *Value);
+void CALLCONV AmdCPUIDRead(uint32 Address, uint32 Regs[4]);
+void CALLCONV ErrorStop(uint32 Value);
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define BYTESIZE 1
+#define WORDSIZE 2
+#define DWORDSIZE 4
+
+#endif /* PORTING_H */
diff --git a/src/northbridge/amd/amdmct/amddefs.h b/src/northbridge/amd/amdmct/amddefs.h
new file mode 100644
index 0000000000..90ab102f73
--- /dev/null
+++ b/src/northbridge/amd/amdmct/amddefs.h
@@ -0,0 +1,69 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Public Revisions - USE THESE VERSIONS TO MAKE COMPARE WITH CPULOGICALID RETURN VALUE*/
+#define AMD_SAFEMODE 0x80000000 /* Unknown future revision - SAFE MODE */
+#define AMD_NPT_F0 0x00000001 /* F0 stepping */
+#define AMD_NPT_F1 0x00000002 /* F1 stepping */
+#define AMD_NPT_F2C 0x00000004
+#define AMD_NPT_F2D 0x00000008
+#define AMD_NPT_F2E 0x00000010 /* F2 stepping E */
+#define AMD_NPT_F2G 0x00000020 /* F2 stepping G */
+#define AMD_NPT_F2J 0x00000040
+#define AMD_NPT_F2K 0x00000080
+#define AMD_NPT_F3L 0x00000100 /* F3 Stepping */
+#define AMD_NPT_G0A 0x00000200 /* G0 stepping */
+#define AMD_NPT_G1B 0x00000400 /* G1 stepping */
+#define AMD_DR_A0A 0x00010000 /* Barcelona A0 */
+#define AMD_DR_A1B 0x00020000 /* Barcelona A1 */
+#define AMD_DR_A2 0x00040000 /* Barcelona A2 */
+#define AMD_DR_B0 0x00080000 /* Barcelona B0 */
+#define AMD_DR_B1 0x00100000 /* Barcelona B1 */
+#define AMD_DR_B2 0x00200000 /* Barcelona B2 */
+#define AMD_DR_BA 0x00400000 /* Barcelona BA */
+
+/*
+ Groups - Create as many as you wish, from the above public values
+*/
+#define AMD_NPT_F2 (AMD_NPT_F2C + AMD_NPT_F2D + AMD_NPT_F2E + AMD_NPT_F2G + AMD_NPT_F2J + AMD_NPT_F2K)
+#define AMD_NPT_F3 (AMD_NPT_F3L)
+#define AMD_NPT_Fx (AMD_NPT_F0 + AMD_NPT_F1 + AMD_NPT_F2 + AMD_NPT_F3)
+#define AMD_NPT_Gx (AMD_NPT_G0A + AMD_NPT_G1B)
+#define AMD_NPT_ALL (AMD_NPT_Fx + AMD_NPT_Gx)
+#define AMD_DR_Ax (AMD_DR_A0A + AMD_DR_A1B + AMD_DR_A2)
+#define AMD_FINEDELAY (AMD_NPT_F0 + AMD_NPT_F1 + AMD_NPT_F2)
+#define AMD_GT_F0 (AMD_NPT_ALL AND NOT AMD_NPT_F0)
+
+
+#define CPUID_EXT_PM 0x80000007
+
+#define CPUID_MODEL 1
+
+
+#define HWCR 0xC0010015
+
+
+#define FidVidStatus 0xC0010042
+
+
+#define FS_Base 0xC0000100
+
+
+#define BU_CFG 0xC0011023
+#define BU_CFG2 0xC001102A
diff --git a/src/northbridge/amd/amdmct/mct/mct.h b/src/northbridge/amd/amdmct/mct/mct.h
new file mode 100644
index 0000000000..2ddd5da7bc
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mct.h
@@ -0,0 +1,552 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef MCT_H
+#define MCT_H
+/*===========================================================================
+ CPU - K8/FAM10
+===========================================================================*/
+#define PT_L1 0 /* CPU Package Type*/
+#define PT_M2 1
+#define PT_S1 2
+
+#define J_MIN 0 /* j loop constraint. 1=CL 2.0 T*/
+#define J_MAX 4 /* j loop constraint. 4=CL 6.0 T*/
+#define K_MIN 1 /* k loop constraint. 1=200 Mhz*/
+#define K_MAX 4 /* k loop constraint. 9=400 Mhz*/
+#define CL_DEF 2 /* Default value for failsafe operation. 2=CL 4.0 T*/
+#define T_DEF 1 /* Default value for failsafe operation. 1=5ns (cycle time)*/
+
+#define BSCRate 1 /* reg bit field=rate of dram scrubber for ecc*/
+ /* memory initialization (ecc and check-bits).*/
+ /* 1=40 ns/64 bytes.*/
+#define FirstPass 1 /* First pass through RcvEn training*/
+#define SecondPass 2 /* Second pass through Rcven training*/
+
+#define RCVREN_MARGIN 6 /* number of DLL taps to delay beyond first passing position*/
+#define MAXASYNCLATCTL_3 60 /* Max Async Latency Control value (This value will be divided by 20)*/
+#define DQS_FAIL 1
+#define DQS_PASS 0
+#define DQS_WRITEDIR 0
+#define DQS_READDIR 1
+#define MIN_DQS_WNDW 3
+#define secPassOffset 6
+
+#define PA_HOST (((24 << 3)+0) << 8) /* Node 0 Host Bus function PCI Address bits [15:0] */
+#define PA_MAP (((24 << 3)+1) << 8) /* Node 0 MAP function PCI Address bits [15:0] */
+#define PA_DCT (((24 << 3)+2) << 8) /* Node 0 DCT function PCI Address bits [15:0] */
+#define PA_DCTADDL (((00 << 3)+2) << 8) /* Node x DCT function, Additional Registers PCI Address bits [15:0] */
+#define PA_NBMISC (((24 << 3)+3) << 8) /* Node 0 Misc PCI Address bits [15:0] */
+#define PA_NBDEVOP (((00 << 3)+3) << 8) /* Node 0 Misc PCI Address bits [15:0] */
+
+#define DCC_EN 1 /* X:2:0x94[19]*/
+#define ILD_Lmt 3 /* X:2:0x94[18:16]*/
+
+#define EncodedTSPD 0x00191709 /* encodes which SPD byte to get T from*/
+ /* versus CL X, CL X-.5, and CL X-1*/
+
+#define Bias_TrpT 3 /* bias to convert bus clocks to bit field value*/
+#define Bias_TrrdT 2
+#define Bias_TrcdT 3
+#define Bias_TrasT 3
+#define Bias_TrcT 11
+#define Bias_TrtpT 4
+#define Bias_TwrT 3
+#define Bias_TwtrT 0
+
+#define Min_TrpT 3 /* min programmable value in busclocks*/
+#define Max_TrpT 6 /* max programmable value in busclocks*/
+#define Min_TrrdT 2
+#define Max_TrrdT 5
+#define Min_TrcdT 3
+#define Max_TrcdT 6
+#define Min_TrasT 5
+#define Max_TrasT 18
+#define Min_TrcT 11
+#define Max_TrcT 26
+#define Min_TrtpT 4
+#define Max_TrtpT 5
+#define Min_TwrT 3
+#define Max_TwrT 6
+#define Min_TwtrT 1
+#define Max_TwtrT 3
+
+/* common register bit names */
+#define DramHoleValid 0 /* func 1, offset F0h, bit 0 */
+#define CSEnable 0 /* func 2, offset 40h-5C, bit 0 */
+#define Spare 1 /* func 2, offset 40h-5C, bit 1 */
+#define TestFail 2 /* func 2, offset 40h-5C, bit 2 */
+#define DqsRcvEnTrain 18 /* func 2, offset 78h, bit 18 */
+#define EnDramInit 31 /* func 2, offset 7Ch, bit 31 */
+#define DisAutoRefresh 18 /* func 2, offset 8Ch, bit 18 */
+#define InitDram 0 /* func 2, offset 90h, bit 0 */
+#define BurstLength32 10 /* func 2, offset 90h, bit 10 */
+#define Width128 11 /* func 2, offset 90h, bit 11 */
+#define X4Dimm 12 /* func 2, offset 90h, bit 12 */
+#define UnBuffDimm 16 /* func 2, offset 90h, bit 16 */
+#define DimmEcEn 19 /* func 2, offset 90h, bit 19 */
+#define MemClkFreqVal 3 /* func 2, offset 94h, bit 3 */
+#define RDqsEn 12 /* func 2, offset 94h, bit 12 */
+#define DisDramInterface 14 /* func 2, offset 94h, bit 14 */
+#define DctAccessWrite 30 /* func 2, offset 98h, bit 30 */
+#define DctAccessDone 31 /* func 2, offset 98h, bit 31 */
+#define PwrSavingsEn 10 /* func 2, offset A0h, bit 10 */
+#define Mod64BitMux 4 /* func 2, offset A0h, bit 4 */
+#define DisableJitter 1 /* func 2, offset A0h, bit 1 */
+#define DramEnabled 9 /* func 2, offset A0h, bit 9 */
+#define SyncOnUcEccEn 2 /* fun 3, offset 44h, bit 2 */
+
+/*=============================================================================
+ Jedec DDR II
+=============================================================================*/
+#define SPD_TYPE 2 /* SPD byte read location*/
+ #define JED_DDRSDRAM 0x07 /* Jedec defined bit field*/
+ #define JED_DDR2SDRAM 0x08 /* Jedec defined bit field*/
+
+#define SPD_DIMMTYPE 20
+#define SPD_ATTRIB 21
+ #define JED_DIFCKMSK 0x20 /* Differential Clock Input*/
+ #define JED_REGADCMSK 0x11 /* Registered Address/Control*/
+ #define JED_PROBEMSK 0x40 /* Analysis Probe installed*/
+#define SPD_DEVATTRIB 22
+#define SPD_EDCTYPE 11
+ #define JED_ECC 0x02
+ #define JED_ADRCPAR 0x04
+#define SPD_ROWSZ 3
+#define SPD_COLSZ 4
+#define SPD_LBANKS 17 /* number of [logical] banks on each device*/
+#define SPD_DMBANKS 5 /* number of physical banks on dimm*/
+ #define SPDPLBit 4 /* Dram package bit*/
+#define SPD_BANKSZ 31 /* capacity of physical bank*/
+#define SPD_DEVWIDTH 13
+#define SPD_CASLAT 18
+#define SPD_TRP 27
+#define SPD_TRRD 28
+#define SPD_TRCD 29
+#define SPD_TRAS 30
+#define SPD_TWR 36
+#define SPD_TWTR 37
+#define SPD_TRTP 38
+#define SPD_TRCRFC 40
+#define SPD_TRC 41
+#define SPD_TRFC 42
+
+#define SPD_MANDATEYR 93 /* Module Manufacturing Year (BCD) */
+
+#define SPD_MANDATEWK 94 /* Module Manufacturing Week (BCD) */
+
+/*--------------------------------------
+ Jedec DDR II related equates
+--------------------------------------*/
+#define MYEAR06 6 /* Manufacturing Year BCD encoding of 2006 - 06d*/
+#define MWEEK24 0x24 /* Manufacturing Week BCD encoding of June - 24d*/
+
+/*=============================================================================
+ Macros
+=============================================================================*/
+
+#define _2GB_RJ8 (2<<(30-8))
+#define _4GB_RJ8 (4<<(30-8))
+#define _4GB_RJ4 (4<<(30-4))
+
+#define BigPagex8_RJ8 (1<<(17+3-8)) /* 128KB * 8 >> 8 */
+
+/*=============================================================================
+ Global MCT Status Structure
+=============================================================================*/
+struct MCTStatStruc {
+ u32 GStatus; /* Global Status bitfield*/
+ u32 HoleBase; /* If not zero, BASE[39:8] (system address)
+ of sub 4GB dram hole for HW remapping.*/
+ u32 Sub4GCacheTop; /* If not zero, the 32-bit top of cacheable memory.*/
+ u32 SysLimit; /* LIMIT[39:8] (system address)*/
+};
+/*=============================================================================
+ Global MCT Configuration Status Word (GStatus)
+=============================================================================*/
+/*These should begin at bit 0 of GStatus[31:0]*/
+#define GSB_MTRRshort 0 /* Ran out of MTRRs while mapping memory*/
+#define GSB_ECCDIMMs 1 /* All banks of all Nodes are ECC capable*/
+#define GSB_DramECCDis 2 /* Dram ECC requested but not enabled.*/
+#define GSB_SoftHole 3 /* A Node Base gap was created*/
+#define GSB_HWHole 4 /* A HW dram remap was created*/
+#define GSB_NodeIntlv 5 /* Node Memory interleaving was enabled*/
+#define GSB_SpIntRemapHole 16 /* Special condition for Node Interleave and HW remapping*/
+
+
+/*===============================================================================
+ Local DCT Status structure (a structure for each DCT)
+===============================================================================*/
+
+struct DCTStatStruc { /* A per Node structure*/
+ u8 Node_ID; /* Node ID of current controller*/
+ u8 ErrCode; /* Current error condition of Node
+ 0= no error
+ 1= Variance Error, DCT is running but not in an optimal configuration.
+ 2= Stop Error, DCT is NOT running
+ 3= Fatal Error, DCT/MCT initialization has been halted.*/
+ u32 ErrStatus; /* Error Status bit Field */
+ u32 Status; /* Status bit Field*/
+ u8 DIMMAddr[8]; /* SPD address of DIMM controlled by MA0_CS_L[0,1]*/
+ /* SPD address of..MB0_CS_L[0,1]*/
+ /* SPD address of..MA1_CS_L[0,1]*/
+ /* SPD address of..MB1_CS_L[0,1]*/
+ /* SPD address of..MA2_CS_L[0,1]*/
+ /* SPD address of..MB2_CS_L[0,1]*/
+ /* SPD address of..MA3_CS_L[0,1]*/
+ /* SPD address of..MB3_CS_L[0,1]*/
+ u16 DIMMPresent; /* For each bit n 0..7, 1=DIMM n is present.
+ DIMM# Select Signal
+ 0 MA0_CS_L[0,1]
+ 1 MB0_CS_L[0,1]
+ 2 MA1_CS_L[0,1]
+ 3 MB1_CS_L[0,1]
+ 4 MA2_CS_L[0,1]
+ 5 MB2_CS_L[0,1]
+ 6 MA3_CS_L[0,1]
+ 7 MB3_CS_L[0,1]*/
+ u16 DIMMValid; /* For each bit n 0..7, 1=DIMM n is valid and is/will be configured*/
+ u16 DIMMSPDCSE; /* For each bit n 0..7, 1=DIMM n SPD checksum error*/
+ u16 DimmECCPresent; /* For each bit n 0..7, 1=DIMM n is ECC capable.*/
+ u16 DimmPARPresent; /* For each bit n 0..7, 1=DIMM n is ADR/CMD Parity capable.*/
+ u16 Dimmx4Present; /* For each bit n 0..7, 1=DIMM n contains x4 data devices.*/
+ u16 Dimmx8Present; /* For each bit n 0..7, 1=DIMM n contains x8 data devices.*/
+ u16 Dimmx16Present; /* For each bit n 0..7, 1=DIMM n contains x16 data devices.*/
+ u16 DIMM1Kpage; /* For each bit n 0..7, 1=DIMM n contains 1K page devices.*/
+ u8 MAload[2]; /* Number of devices loading MAA bus*/
+ /* Number of devices loading MAB bus*/
+ u8 MAdimms[2]; /* Number of DIMMs loading CH A*/
+ /* Number of DIMMs loading CH B*/
+ u8 DATAload[2]; /* Number of ranks loading CH A DATA*/
+ /* Number of ranks loading CH B DATA*/
+ u8 DIMMAutoSpeed; /* Max valid Mfg. Speed of DIMMs
+ 1=200Mhz
+ 2=266Mhz
+ 3=333Mhz
+ 4=400Mhz */
+ u8 DIMMCASL; /* Min valid Mfg. CL bitfield
+ 0=2.0
+ 1=3.0
+ 2=4.0
+ 3=5.0
+ 4=6.0 */
+ u16 DIMMTrcd; /* Minimax Trcd*40 (ns) of DIMMs*/
+ u16 DIMMTrp; /* Minimax Trp*40 (ns) of DIMMs*/
+ u16 DIMMTrtp; /* Minimax Trtp*40 (ns) of DIMMs*/
+ u16 DIMMTras; /* Minimax Tras*40 (ns) of DIMMs*/
+ u16 DIMMTrc; /* Minimax Trc*40 (ns) of DIMMs*/
+ u16 DIMMTwr; /* Minimax Twr*40 (ns) of DIMMs*/
+ u16 DIMMTrrd; /* Minimax Trrd*40 (ns) of DIMMs*/
+ u16 DIMMTwtr; /* Minimax Twtr*40 (ns) of DIMMs*/
+ u8 Speed; /* Bus Speed (to set Controller)
+ 1=200Mhz
+ 2=266Mhz
+ 3=333Mhz
+ 4=400Mhz */
+ u8 CASL; /* CAS latency DCT setting
+ 0=2.0
+ 1=3.0
+ 2=4.0
+ 3=5.0
+ 4=6.0 */
+ u8 Trcd; /* DCT Trcd (busclocks) */
+ u8 Trp; /* DCT Trp (busclocks) */
+ u8 Trtp; /* DCT Trtp (busclocks) */
+ u8 Tras; /* DCT Tras (busclocks) */
+ u8 Trc; /* DCT Trc (busclocks) */
+ u8 Twr; /* DCT Twr (busclocks) */
+ u8 Trrd; /* DCT Trrd (busclocks) */
+ u8 Twtr; /* DCT Twtr (busclocks) */
+ u8 Trfc[4]; /* DCT Logical DIMM0 Trfc
+ 0=75ns (for 256Mb devs)
+ 1=105ns (for 512Mb devs)
+ 2=127.5ns (for 1Gb devs)
+ 3=195ns (for 2Gb devs)
+ 4=327.5ns (for 4Gb devs) */
+ /* DCT Logical DIMM1 Trfc (see Trfc0 for format) */
+ /* DCT Logical DIMM2 Trfc (see Trfc0 for format) */
+ /* DCT Logical DIMM3 Trfc (see Trfc0 for format) */
+ u16 CSPresent; /* For each bit n 0..7, 1=Chip-select n is present */
+ u16 CSTestFail; /* For each bit n 0..7, 1=Chip-select n is present but disabled */
+ u32 DCTSysBase; /* BASE[39:8] (system address) of this Node's DCTs. */
+ u32 DCTHoleBase; /* If not zero, BASE[39:8] (system address) of dram hole for HW remapping. Dram hole exists on this Node's DCTs. */
+ u32 DCTSysLimit; /* LIMIT[39:8] (system address) of this Node's DCTs */
+ u16 PresetmaxFreq; /* Maximum OEM defined DDR frequency
+ 200=200Mhz (DDR400)
+ 266=266Mhz (DDR533)
+ 333=333Mhz (DDR667)
+ 400=400Mhz (DDR800) */
+ u8 _2Tmode; /* 1T or 2T CMD mode (slow access mode)
+ 1=1T
+ 2=2T */
+ u8 TrwtTO; /* DCT TrwtTO (busclocks)*/
+ u8 Twrrd; /* DCT Twrrd (busclocks)*/
+ u8 Twrwr; /* DCT Twrwr (busclocks)*/
+ u8 Trdrd; /* DCT Trdrd (busclocks)*/
+ u32 CH_ODC_CTL[2]; /* Output Driver Strength (see BKDG FN2:Offset 9Ch, index 00h*/
+ u32 CH_ADDR_TMG[2]; /* Address Bus Timing (see BKDG FN2:Offset 9Ch, index 04h*/
+ /* Output Driver Strength (see BKDG FN2:Offset 9Ch, index 20h*/
+ /* Address Bus Timing (see BKDG FN2:Offset 9Ch, index 24h*/
+ u16 CH_EccDQSLike[2]; /* CHA DQS ECC byte like...*/
+ u8 CH_EccDQSScale[2]; /* CHA DQS ECC byte scale*/
+// u8 reserved_b_1; /* Reserved*/
+ /* CHB DQS ECC byte like...*/
+ /* CHB DQS ECC byte scale*/
+// u8 reserved_b_2; /*Reserved*/
+ u8 MaxAsyncLat; /* Max Asynchronous Latency (ns)*/
+ u8 CH_B_DQS[2][2][9]; /* CHA Byte 0 - 7 and Check Write DQS Delay*/
+ /* Reserved*/
+ /* CHA Byte 0 - 7 and Check Read DQS Delay*/
+ /* Reserved*/
+ /* CHB Byte 0 - 7 and Check Write DQS Delay*/
+ /* Reserved*/
+ /* CHB Byte 0 - 7 and Check Read DQS Delay*/
+ /* Reserved*/
+ u8 CH_D_RCVRDLY[2][4]; /* CHA DIMM 0 - 3 Receiver Enable Delay*/
+ /* CHB DIMM 0 - 3 Receiver Enable Delay*/
+ u32 PtrPatternBufA; /* Ptr on stack to aligned DQS testing pattern*/
+ u32 PtrPatternBufB; /*Ptr on stack to aligned DQS testing pattern*/
+ u8 Channel; /* Current Channel (0= CH A, 1=CH B)*/
+ u8 ByteLane; /* Current Byte Lane (0..7)*/
+ u8 Direction; /* Current DQS-DQ training write direction (0=read, 1=write)*/
+ u8 Pattern; /* Current pattern*/
+ u8 DQSDelay; /* Current DQS delay value*/
+ u32 TrainErrors; /* Current Training Errors*/
+// u8 reserved_b_3; /* RSVD */
+ u32 AMC_TSC_DeltaLo; /* Time Stamp Counter measurement of AMC, Low dword*/
+ u32 AMC_TSC_DeltaHi; /* Time Stamp Counter measurement of AMC, High dword*/
+ u8 CH_B_Dly[2][2][2][8]; /* CH A byte lane 0 - 7 minimum filtered window passing DQS delay value*/
+ /* CH A byte lane 0 - 7 maximum filtered window passing DQS delay value*/
+ /* CH B byte lane 0 - 7 minimum filtered window passing DQS delay value*/
+ /* CH B byte lane 0 - 7 maximum filtered window passing DQS delay value*/
+ /* CH A byte lane 0 - 7 minimum filtered window passing DQS delay value*/
+ /* CH A byte lane 0 - 7 maximum filtered window passing DQS delay value*/
+ /* CH B byte lane 0 - 7 minimum filtered window passing DQS delay value*/
+ /* CH B byte lane 0 - 7 maximum filtered window passing DQS delay value*/
+ u32 LogicalCPUID; /* The logical CPUID of the node*/
+ u16 HostBiosSrvc1; /* Word sized general purpose field for use by host BIOS. Scratch space.*/
+ u32 HostBiosSrvc2; /* Dword sized general purpose field for use by host BIOS. Scratch space.*/
+ u16 DimmQRPresent; /* QuadRank DIMM present?*/
+ u16 DimmTrainFail; /* Bitmap showing which dimms failed training*/
+ u16 CSTrainFail; /* Bitmap showing which chipselects failed training*/
+ u16 DimmYr06; /* Bitmap indicating which Dimms have a manufactur's year code <= 2006*/
+ u16 DimmWk2406; /* Bitmap indicating which Dimms have a manufactur's week code <= 24 of 2006 (June)*/
+ u16 DimmDRPresent; /* Bitmap indicating that Dual Rank Dimms are present*/
+ u16 DimmPlPresent; /* Bitmap indicating that Planar (1) or Stacked (0) Dimms are present.*/
+ u16 ChannelTrainFail; /* Bitmap showing the chanel informaiton about failed Chip Selects*/
+ /* 0 in any bit field indicates Channel 0*/
+ /* 1 in any bit field indicates Channel 1*/
+};
+
+/*===============================================================================
+ Local Error Status Codes (DCTStatStruc.ErrCode)
+===============================================================================*/
+#define SC_RunningOK 0
+#define SC_VarianceErr 1 /* Running non-optimally*/
+#define SC_StopError 2 /* Not Running*/
+#define SC_FatalErr 3 /* Fatal Error, MCTB has exited immediately*/
+
+/*===============================================================================
+ Local Error Status (DCTStatStruc.ErrStatus[31:0])
+ ===============================================================================*/
+#define SB_NoDimms 0
+#define SB_DIMMChkSum 1
+#define SB_DimmMismatchM 2 /* dimm module type(buffer) mismatch*/
+#define SB_DimmMismatchT 3 /* dimm CL/T mismatch*/
+#define SB_DimmMismatchO 4 /* dimm organization mismatch (128-bit)*/
+#define SB_NoTrcTrfc 5 /* SPD missing Trc or Trfc info*/
+#define SB_NoCycTime 6 /* SPD missing byte 23 or 25*/
+#define SB_BkIntDis 7 /* Bank interleave requested but not enabled*/
+#define SB_DramECCDis 8 /* Dram ECC requested but not enabled*/
+#define SB_SpareDis 9 /* Online spare requested but not enabled*/
+#define SB_MinimumMode 10 /* Running in Minimum Mode*/
+#define SB_NORCVREN 11 /* No DQS Receiver Enable pass window found*/
+#define SB_CHA2BRCVREN 12 /* DQS Rcvr En pass window CHA to CH B too large*/
+#define SB_SmallRCVR 13 /* DQS Rcvr En pass window too small (far right of dynamic range)*/
+#define SB_NODQSPOS 14 /* No DQS-DQ passing positions*/
+#define SB_SMALLDQS 15 /* DQS-DQ passing window too small*/
+
+/*===============================================================================
+ Local Configuration Status (DCTStatStruc.Status[31:0])
+===============================================================================*/
+#define SB_Registered 0 /* All DIMMs are Registered*/
+#define SB_ECCDIMMs 1 /* All banks ECC capable*/
+#define SB_PARDIMMs 2 /* All banks Addr/CMD Parity capable*/
+#define SB_DiagClks 3 /* Jedec ALL slots clock enable diag mode*/
+#define SB_128bitmode 4 /* DCT in 128-bit mode operation*/
+#define SB_64MuxedMode 5 /* DCT in 64-bit mux'ed mode.*/
+#define SB_2TMode 6 /* 2T CMD timing mode is enabled.*/
+#define SB_SWNodeHole 7 /* Remapping of Node Base on this Node to create a gap.*/
+#define SB_HWHole 8 /* Memory Hole created on this Node using HW remapping.*/
+
+
+
+/*===============================================================================
+ NVRAM/run-time-configurable Items
+===============================================================================*/
+/* Platform Configuration */
+#define NV_PACK_TYPE 0 /* CPU Package Type (2-bits)
+ 0=NPT L1
+ 1=NPT M2
+ 2=NPT S1*/
+#define NV_MAX_NODES 1 /* Number of Nodes/Sockets (4-bits)*/
+#define NV_MAX_DIMMS 2 /* Number of DIMM slots for the specified Node ID (4-bits)*/
+#define NV_MAX_MEMCLK 3 /* Maximum platform demonstrated Memclock (10-bits)
+ 200=200Mhz (DDR400)
+ 266=266Mhz (DDR533)
+ 333=333Mhz (DDR667)
+ 400=400Mhz (DDR800)*/
+#define NV_ECC_CAP 4 /* Bus ECC capable (1-bits)
+ 0=Platform not capable
+ 1=Platform is capable*/
+#define NV_4RANKType 5 /* Quad Rank DIMM slot type (2-bits)
+ 0=Normal
+ 1=R4 (4-Rank Registered DIMMs in AMD server configuration)
+ 2=S4 (Unbuffered SO-DIMMs)*/
+#define NV_BYPMAX 6 /* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
+ 4=4 times bypass (normal for non-UMA systems)
+ 7=7 times bypass (normal for UMA systems)*/
+#define NV_RDWRQBYP 7 /* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
+ 2=8 times (normal for non-UMA systems)
+ 3=16 times (normal for UMA systems)*/
+
+
+/* Dram Timing */
+#define NV_MCTUSRTMGMODE 10 /* User Memclock Mode (2-bits)
+ 0=Auto, no user limit
+ 1=Auto, user limit provided in NV_MemCkVal
+ 2=Manual, user value provided in NV_MemCkVal*/
+#define NV_MemCkVal 11 /* Memory Clock Value (2-bits)
+ 0=200Mhz
+ 1=266Mhz
+ 2=333Mhz
+ 3=400Mhz*/
+
+/* Dram Configuration */
+#define NV_BankIntlv 20 /* Dram Bank (chip-select) Interleaving (1-bits)
+ 0=disable
+ 1=enable*/
+#define NV_AllMemClks 21 /* Turn on All DIMM clocks (1-bits)
+ 0=normal
+ 1=enable all memclocks*/
+#define NV_SPDCHK_RESTRT 22 /* SPD Check control bitmap (1-bits)
+ 0=Exit current node init if any DIMM has SPD checksum error
+ 1=Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
+#define NV_DQSTrainCTL 23 /* DQS Signal Timing Training Control
+ 0=skip DQS training
+ 1=perform DQS training*/
+#define NV_NodeIntlv 24 /* Node Memory Interleaving (1-bits)
+ 0=disable
+ 1=enable*/
+#define NV_BurstLen32 25 /* burstLength32 for 64-bit mode (1-bits)
+ 0=disable (normal)
+ 1=enable (4 beat burst when width is 64-bits)*/
+
+/* Dram Power */
+#define NV_CKE_PDEN 30 /* CKE based power down mode (1-bits)
+ 0=disable
+ 1=enable*/
+#define NV_CKE_CTL 31 /* CKE based power down control (1-bits)
+ 0=per Channel control
+ 1=per Chip select control*/
+#define NV_CLKHZAltVidC3 32 /* Memclock tri-stating during C3 and Alt VID (1-bits)
+ 0=disable
+ 1=enable*/
+
+/* Memory Map/Mgt.*/
+#define NV_BottomIO 40 /* Bottom of 32-bit IO space (8-bits)
+ NV_BottomIO[7:0]=Addr[31:24]*/
+#define NV_BottomUMA 41 /* Bottom of shared graphics dram (8-bits)
+ NV_BottomUMA[7:0]=Addr[31:24]*/
+#define NV_MemHole 42 /* Memory Hole Remapping (1-bits)
+ 0=disable
+ 1=enable */
+
+/* ECC */
+#define NV_ECC 50 /* Dram ECC enable*/
+#define NV_NBECC 52 /* ECC MCE enable*/
+#define NV_ChipKill 53 /* Chip-Kill ECC Mode enable*/
+#define NV_ECCRedir 54 /* Dram ECC Redirection enable*/
+#define NV_DramBKScrub 55 /* Dram ECC Background Scrubber CTL*/
+#define NV_L2BKScrub 56 /* L2 ECC Background Scrubber CTL*/
+#define NV_DCBKScrub 57 /* DCache ECC Background Scrubber CTL*/
+#define NV_CS_SpareCTL 58 /* Chip Select Spare Control bit 0:
+ 0=disable Spare
+ 1=enable Spare */
+ /*Chip Select Spare Control bit 1-4:
+ Reserved, must be zero*/
+#define NV_Parity 60 /* Parity Enable*/
+#define NV_SyncOnUnEccEn 61 /* SyncOnUnEccEn control
+ 0=disable
+ 1=enable*/
+
+#ifndef MAX_NODES_SUPPORTED
+#define MAX_NODES_SUPPORTED 8
+#endif
+
+#ifndef MAX_DIMMS_SUPPORTED
+#define MAX_DIMMS_SUPPORTED 8
+#endif
+
+#ifndef MAX_CS_SUPPORTED
+#define MAX_CS_SUPPORTED 8
+#endif
+
+
+// global function
+u32 NodePresent(u32 Node);
+u32 Get_NB32n(struct DCTStatStruc *pDCTstat, u32 addrx);
+u32 Get_NB32(u32 addr); /* NOTE: extend addr to 32 bit for bus > 0 */
+u32 mctGetLogicalCPUID(u32 Node);
+
+void K8FInterleaveBanks(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
+
+void mctInitWithWritetoCS(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
+
+void mctGet_PS_Cfg(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
+void Get_ChannelPS_Cfg0( unsigned MAAdimms, unsigned Speed, unsigned MAAload, unsigned DATAAload,
+ unsigned *AddrTmgCTL, unsigned *ODC_CTL);
+void Get_ChannelPS_Cfg1( unsigned MAAdimms, unsigned Speed, unsigned MAAload,
+ unsigned *AddrTmgCTL, unsigned *ODC_CTL, unsigned *val);
+void Get_ChannelPS_Cfg2( unsigned MAAdimms, unsigned Speed, unsigned MAAload,
+ unsigned *AddrTmgCTL, unsigned *ODC_CTL, unsigned *val);
+
+u8 MCTDefRet(void);
+
+u32 Get_RcvrSysAddr(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 channel, u8 receiver, u8 *valid);
+u32 Get_MCTSysAddr(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 channel, u8 chipsel, u8 *valid);
+void K8FTrainReceiverEn(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA, u8 pass);
+void K8FTrainDQSPos(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+u32 SetUpperFSbase(u32 addr_hi);
+
+
+void K8FECCInit(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+
+unsigned amd_FD_support(void);
+void amd_MCTInit(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+
+void K8FCPUMemTyping(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+void K8FCPUMemTyping_clear(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+
+void K8FWaitMemClrDelay(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
+unsigned K8FCalcFinalDQSRcvValue(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, unsigned LeftRcvEn, unsigned RightRcvEn, unsigned *valid);
+
+void K8FGetDeltaTSCPart1(struct DCTStatStruc *pDCTstat);
+void K8FGetDeltaTSCPart2(struct DCTStatStruc *pDCTstat);
+#endif
diff --git a/src/northbridge/amd/amdmct/mct/mct_d.c b/src/northbridge/amd/amdmct/mct/mct_d.c
new file mode 100644
index 0000000000..b4a5fdcf1f
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mct_d.c
@@ -0,0 +1,3862 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Description: Main memory controller system configuration for DDR 2 */
+
+
+/* KNOWN ISSUES - ERRATA
+ *
+ * Trtp is not calculated correctly when the controller is in 64-bit mode, it
+ * is 1 busclock off. No fix planned. The controller is not ordinarily in
+ * 64-bit mode.
+ *
+ * 32 Byte burst not supported. No fix planned. The controller is not
+ * ordinarily in 64-bit mode.
+ *
+ * Trc precision does not use extra Jedec defined fractional component.
+ * InsteadTrc (course) is rounded up to nearest 1 ns.
+ *
+ * Mini and Micro DIMM not supported. Only RDIMM, UDIMM, SO-DIMM defined types
+ * supported.
+ */
+
+static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static u8 NodePresent_D(u8 Node);
+static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static u8 Get_DefTrc_k_D(u8 k);
+static u16 Get_40Tk_D(u8 k);
+static u16 Get_Fk_D(u8 k);
+static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat, u8 i, u8 j, u8 k);
+static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, int j, int k);
+static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i);
+static void mct_initDCT(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void mct_DramInit(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat);
+static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,\
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
+ u32 dev, u32 index_reg, u32 index);
+static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
+ u32 dev, u32 index_reg);
+static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
+ u32 dev, u32 index_reg);
+static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
+ u32 dev, u32 index_reg, u32 index);
+static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat, u8 dct,
+ u32 dev, u32 index_reg, u32 index);
+static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void mct_init(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void SetCSTriState(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void SetODTTriState(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static u32 mct_NodePresent_D(void);
+static void WaitRoutine_D(u32 time);
+static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct);
+static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+
+
+/*See mctAutoInitMCT header for index relationships to CL and T*/
+static const u16 Table_F_k[] = {00,200,266,333,400,533 };
+static const u8 Table_T_k[] = {0x00,0x50,0x3D,0x30,0x25, 0x18 };
+static const u8 Table_CL2_j[] = {0x04,0x08,0x10,0x20,0x40, 0x80 };
+static const u8 Tab_defTrc_k[] = {0x0,0x41,0x3C,0x3C,0x3A, 0x3A };
+static const u16 Tab_40T_k[] = {00,200,150,120,100,75 };
+static const u8 Tab_TrefT_k[] = {00,0,1,1,2,2,3,4,5,6,0,0};
+static const u8 Tab_BankAddr[] = {0x0,0x08,0x09,0x10,0x0C,0x0D,0x11,0x0E,0x15,0x16,0x0F,0x17};
+static const u8 Tab_tCL_j[] = {0,2,3,4,5};
+static const u8 Tab_1KTfawT_k[] = {00,8,10,13,14,20};
+static const u8 Tab_2KTfawT_k[] = {00,10,14,17,18,24};
+static const u8 Tab_L1CLKDis[] = {8,8,6,4,2,0,8,8};
+static const u8 Tab_M2CLKDis[] = {2,0,8,8,2,0,2,0};
+static const u8 Tab_S1CLKDis[] = {8,0,8,8,8,0,8,0};
+static const u8 Table_Comp_Rise_Slew_20x[] = {7, 3, 2, 2, 0xFF};
+static const u8 Table_Comp_Rise_Slew_15x[] = {7, 7, 3, 2, 0xFF};
+static const u8 Table_Comp_Fall_Slew_20x[] = {7, 5, 3, 2, 0xFF};
+static const u8 Table_Comp_Fall_Slew_15x[] = {7, 7, 5, 3, 0xFF};
+
+void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ /*
+ * Memory may be mapped contiguously all the way up to 4GB (depending
+ * on setup options). It is the responsibility of PCI subsystem to
+ * create an uncacheable IO region below 4GB and to adjust TOP_MEM
+ * downward prior to any IO mapping or accesses. It is the same
+ * responsibility of the CPU sub-system prior toaccessing LAPIC.
+ *
+ * Slot Number is an external convention, and is determined by OEM with
+ * accompanying silk screening. OEM may choose to use Slot number
+ * convention which is consistent with DIMM number conventions.
+ * All AMD engineering
+ * platforms do.
+ *
+ * Run-Time Requirements:
+ * 1. Complete Hypertransport Bus Configuration
+ * 2. SMBus Controller Initialized
+ * 3. Checksummed or Valid NVRAM bits
+ * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
+ * 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to
+ * entry
+ * 6. All var MTRRs reset to zero
+ * 7. State of NB_CFG.DisDatMsk set properly on all CPUs
+ * 8. All CPUs at 2Ghz Speed (unless DQS training is not installed).
+ * 9. All cHT links at max Speed/Width (unless DQS training is not
+ * installed).
+ *
+ *
+ * Global relationship between index values and item values:
+ * j CL(j) k F(k)
+ * --------------------------
+ * 0 2.0 - -
+ * 1 3.0 1 200 Mhz
+ * 2 4.0 2 266 Mhz
+ * 3 5.0 3 333 Mhz
+ * 4 6.0 4 400 Mhz
+ * 5 7.0 5 533 Mhz
+ */
+ u8 Node, NodesWmem;
+ u32 node_sys_base;
+
+restartinit:
+ mctInitMemGPIOs_A_D(); /* Set any required GPIOs*/
+ NodesWmem = 0;
+ node_sys_base = 0;
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+ pDCTstat->Node_ID = Node;
+ pDCTstat->dev_host = PA_HOST(Node);
+ pDCTstat->dev_map = PA_MAP(Node);
+ pDCTstat->dev_dct = PA_DCT(Node);
+ pDCTstat->dev_nbmisc = PA_NBMISC(Node);
+ pDCTstat->NodeSysBase = node_sys_base;
+
+ print_tx("mctAutoInitMCT_D: mct_init Node ", Node);
+ mct_init(pMCTstat, pDCTstat);
+ mctNodeIDDebugPort_D();
+ pDCTstat->NodePresent = NodePresent_D(Node);
+ if (pDCTstat->NodePresent) { /* See if Node is there*/
+ print_t("mctAutoInitMCT_D: clear_legacy_Mode\n");
+ clear_legacy_Mode(pMCTstat, pDCTstat);
+ pDCTstat->LogicalCPUID = mctGetLogicalCPUID_D(Node);
+
+ print_t("mctAutoInitMCT_D: mct_InitialMCT_D\n");
+ mct_InitialMCT_D(pMCTstat, pDCTstat);
+
+ print_t("mctAutoInitMCT_D: mctSMBhub_Init\n");
+ mctSMBhub_Init(Node); /* Switch SMBUS crossbar to proper node*/
+
+ print_t("mctAutoInitMCT_D: mct_initDCT\n");
+ mct_initDCT(pMCTstat, pDCTstat);
+ if (pDCTstat->ErrCode == SC_FatalErr) {
+ goto fatalexit; /* any fatal errors?*/
+ } else if (pDCTstat->ErrCode < SC_StopError) {
+ NodesWmem++;
+ }
+ } /* if Node present */
+ node_sys_base = pDCTstat->NodeSysBase;
+ node_sys_base += (pDCTstat->NodeSysLimit + 2) & ~0x0F;
+ }
+ if (NodesWmem == 0) {
+ print_debug("No Nodes?!\n");
+ goto fatalexit;
+ }
+
+ print_t("mctAutoInitMCT_D: SyncDCTsReady_D\n");
+ SyncDCTsReady_D(pMCTstat, pDCTstatA); /* Make sure DCTs are ready for accesses.*/
+
+ print_t("mctAutoInitMCT_D: HTMemMapInit_D\n");
+ HTMemMapInit_D(pMCTstat, pDCTstatA); /* Map local memory into system address space.*/
+ mctHookAfterHTMap();
+
+ print_t("mctAutoInitMCT_D: CPUMemTyping_D\n");
+ CPUMemTyping_D(pMCTstat, pDCTstatA); /* Map dram into WB/UC CPU cacheability */
+ mctHookAfterCPU(); /* Setup external northbridge(s) */
+
+ print_t("mctAutoInitMCT_D: DQSTiming_D\n");
+ DQSTiming_D(pMCTstat, pDCTstatA); /* Get Receiver Enable and DQS signal timing*/
+
+ print_t("mctAutoInitMCT_D: :OtherTiming\n");
+ mct_OtherTiming(pMCTstat, pDCTstatA);
+
+ if (ReconfigureDIMMspare_D(pMCTstat, pDCTstatA)) { /* RESET# if 1st pass of DIMM spare enabled*/
+ goto restartinit;
+ }
+
+ InterleaveNodes_D(pMCTstat, pDCTstatA);
+ InterleaveChannels_D(pMCTstat, pDCTstatA);
+
+ print_t("mctAutoInitMCT_D: ECCInit_D\n");
+ if (ECCInit_D(pMCTstat, pDCTstatA)) { /* Setup ECC control and ECC check-bits*/
+ print_t("mctAutoInitMCT_D: MCTMemClr_D\n");
+ MCTMemClr_D(pMCTstat,pDCTstatA);
+ }
+
+ mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0) ); // Node 0
+ print_t("All Done\n");
+ return;
+
+fatalexit:
+ die("mct_d: fatalexit");
+}
+
+
+static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 ret;
+
+ if (mctGet_NVbits(NV_CS_SpareCTL)) {
+ if (MCT_DIMM_SPARE_NO_WARM) {
+ /* Do no warm-reset DIMM spare */
+ if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
+ LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA);
+ ret = 0;
+ } else {
+ mct_ResetDataStruct_D(pMCTstat, pDCTstatA);
+ pMCTstat->GStatus |= 1 << GSB_EnDIMMSpareNW;
+ ret = 1;
+ }
+ } else {
+ /* Do warm-reset DIMM spare */
+ if (mctGet_NVbits(NV_DQSTrainCTL))
+ mctWarmReset_D();
+ ret = 0;
+ }
+
+
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+
+static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 nv_DQSTrainCTL;
+
+ if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
+ return;
+ }
+ nv_DQSTrainCTL = mctGet_NVbits(NV_DQSTrainCTL);
+ /* FIXME: BOZO- DQS training every time*/
+ nv_DQSTrainCTL = 1;
+
+ if (nv_DQSTrainCTL) {
+ print_t("DQSTiming_D: mct_BeforeDQSTrain_D:\n");
+ mct_BeforeDQSTrain_D(pMCTstat, pDCTstatA);;
+ phyAssistedMemFnceTraining(pMCTstat, pDCTstatA);
+ mctHookBeforeAnyTraining();
+
+ print_t("DQSTiming_D: TrainReceiverEn_D FirstPass:\n");
+ TrainReceiverEn_D(pMCTstat, pDCTstatA, FirstPass);
+
+ print_t("DQSTiming_D: mct_TrainDQSPos_D\n");
+ mct_TrainDQSPos_D(pMCTstat, pDCTstatA);
+
+ // Second Pass never used for Barcelona!
+ //print_t("DQSTiming_D: TrainReceiverEn_D SecondPass:\n");
+ //TrainReceiverEn_D(pMCTstat, pDCTstatA, SecondPass);
+
+ print_t("DQSTiming_D: mctSetEccDQSRcvrEn_D\n");
+ mctSetEccDQSRcvrEn_D(pMCTstat, pDCTstatA);
+
+ print_t("DQSTiming_D: TrainMaxReadLatency_D\n");
+//FIXME - currently uses calculated value TrainMaxReadLatency_D(pMCTstat, pDCTstatA);
+ mctHookAfterAnyTraining();
+ mctSaveDQSSigTmg_D();
+
+ print_t("DQSTiming_D: mct_EndDQSTraining_D\n");
+ mct_EndDQSTraining_D(pMCTstat, pDCTstatA);
+
+ print_t("DQSTiming_D: MCTMemClr_D\n");
+ MCTMemClr_D(pMCTstat, pDCTstatA);
+ } else {
+ mctGetDQSSigTmg_D(); /* get values into data structure */
+ LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA); /* load values into registers.*/
+ //mctDoWarmResetMemClr_D();
+ MCTMemClr_D(pMCTstat, pDCTstatA);
+ }
+}
+
+
+static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node, Receiver, Channel, Dir, DIMM;
+ u32 dev;
+ u32 index_reg;
+ u32 reg;
+ u32 index;
+ u32 val;
+
+
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+
+ if (pDCTstat->DCTSysLimit) {
+ dev = pDCTstat->dev_dct;
+ for (Channel = 0;Channel < 2; Channel++) {
+ /* there are four receiver pairs,
+ loosely associated with chipselects.*/
+ index_reg = 0x98 + Channel * 0x100;
+ for (Receiver = 0; Receiver < 8; Receiver += 2) {
+ /* Set Receiver Enable Values */
+ mct_SetRcvrEnDly_D(pDCTstat,
+ 0, /* RcvrEnDly */
+ 1, /* FinalValue, From stack */
+ Channel,
+ Receiver,
+ dev, index_reg,
+ (Receiver >> 1) * 3 + 0x10, /* Addl_Index */
+ 2); /* Pass Second Pass ? */
+
+ }
+ }
+ for (Channel = 0; Channel<2; Channel++) {
+ SetEccDQSRcvrEn_D(pDCTstat, Channel);
+ }
+
+ for (Channel = 0; Channel < 2; Channel++) {
+ u8 *p;
+ index_reg = 0x98 + Channel * 0x100;
+
+ /* NOTE:
+ * when 400, 533, 667, it will support dimm0/1/2/3,
+ * and set conf for dimm0, hw will copy to dimm1/2/3
+ * set for dimm1, hw will copy to dimm3
+ * Rev A/B only support DIMM0/1 when 800Mhz and above
+ * + 0x100 to next dimm
+ * Rev C support DIMM0/1/2/3 when 800Mhz and above
+ * + 0x100 to next dimm
+ */
+ for (DIMM = 0; DIMM < 2; DIMM++) {
+ if (DIMM==0) {
+ index = 0; /* CHA Write Data Timing Low */
+ } else {
+ if (pDCTstat->Speed >= 4) {
+ index = 0x100 * DIMM;
+ } else {
+ break;
+ }
+ }
+ for (Dir=0;Dir<2;Dir++) {//RD/WR
+ p = pDCTstat->CH_D_DIR_B_DQS[Channel][DIMM][Dir];
+ val = stream_to_int(p); /* CHA Read Data Timing High */
+ Set_NB32_index_wait(dev, index_reg, index+1, val);
+ val = stream_to_int(p+4); /* CHA Write Data Timing High */
+ Set_NB32_index_wait(dev, index_reg, index+2, val);
+ val = *(p+8); /* CHA Write ECC Timing */
+ Set_NB32_index_wait(dev, index_reg, index+3, val);
+ index += 4;
+ }
+ }
+ }
+
+ for (Channel = 0; Channel<2; Channel++) {
+ reg = 0x78 + Channel * 0x100;
+ val = Get_NB32(dev, reg);
+ val &= ~(0x3ff<<22);
+ val |= ((u32) pDCTstat->CH_MaxRdLat[Channel] << 22);
+ val &= ~(1<<DqsRcvEnTrain);
+ Set_NB32(dev, reg, val); /* program MaxRdLatency to correspond with current delay*/
+ }
+ }
+ }
+}
+
+
+static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ /* Clear MC4_STS for all Nodes in the system. This is required in some
+ * circumstances to clear left over garbage from cold reset, shutdown,
+ * or normal ECC memory conditioning.
+ */
+
+ //FIXME: this function depends on pDCTstat Array ( with Node id ) - Is this really a problem?
+
+ u32 dev;
+ u8 Node;
+
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+
+ if (pDCTstat->NodePresent) {
+ dev = pDCTstat->dev_nbmisc;
+ /*MCA NB Status Low (alias to MC4_STS[31:0] */
+ Set_NB32(dev, 0x48, 0);
+ /* MCA NB Status High (alias to MC4_STS[63:32] */
+ Set_NB32(dev, 0x4C, 0);
+ }
+ }
+}
+
+
+static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node;
+ u32 NextBase, BottomIO;
+ u8 _MemHoleRemap, DramHoleBase, DramHoleOffset;
+ u32 HoleSize, DramSelBaseAddr;
+
+ u32 val;
+ u32 base;
+ u32 limit;
+ u32 dev;
+ struct DCTStatStruc *pDCTstat;
+
+ _MemHoleRemap = mctGet_NVbits(NV_MemHole);
+
+ if (pMCTstat->HoleBase == 0) {
+ DramHoleBase = mctGet_NVbits(NV_BottomIO);
+ } else {
+ DramHoleBase = pMCTstat->HoleBase >> (24-8);
+ }
+
+ BottomIO = DramHoleBase << (24-8);
+
+ NextBase = 0;
+ pDCTstat = pDCTstatA + 0;
+ dev = pDCTstat->dev_map;
+
+
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ DramSelBaseAddr = 0;
+ pDCTstat = pDCTstatA + Node;
+ if (!pDCTstat->GangedMode) {
+ DramSelBaseAddr = pDCTstat->NodeSysLimit - pDCTstat->DCTSysLimit;
+ /*In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
+ val = pDCTstat->NodeSysLimit;
+ if ((val & 0xFF) == 0xFE) {
+ DramSelBaseAddr++;
+ val++;
+ }
+ pDCTstat->DCTSysLimit = val;
+ }
+
+ base = pDCTstat->DCTSysBase;
+ limit = pDCTstat->DCTSysLimit;
+ if (limit > base) {
+ base += NextBase;
+ limit += NextBase;
+ DramSelBaseAddr += NextBase;
+ printk_debug(" Node: %02x base: %02x limit: %02x BottomIO: %02x\n", Node, base, limit, BottomIO);
+
+ if (_MemHoleRemap) {
+ if ((base < BottomIO) && (limit >= BottomIO)) {
+ /* HW Dram Remap */
+ pDCTstat->Status |= 1 << SB_HWHole;
+ pMCTstat->GStatus |= 1 << GSB_HWHole;
+ pDCTstat->DCTSysBase = base;
+ pDCTstat->DCTSysLimit = limit;
+ pDCTstat->DCTHoleBase = BottomIO;
+ pMCTstat->HoleBase = BottomIO;
+ HoleSize = _4GB_RJ8 - BottomIO; /* HoleSize[39:8] */
+ if ((DramSelBaseAddr > 0) && (DramSelBaseAddr < BottomIO))
+ base = DramSelBaseAddr;
+ val = ((base + HoleSize) >> (24-8)) & 0xFF;
+ DramHoleOffset = val;
+ val <<= 8; /* shl 16, rol 24 */
+ val |= DramHoleBase << 24;
+ val |= 1 << DramHoleValid;
+ Set_NB32(dev, 0xF0, val); /*Dram Hole Address Register*/
+ pDCTstat->DCTSysLimit += HoleSize;
+ base = pDCTstat->DCTSysBase;
+ limit = pDCTstat->DCTSysLimit;
+ } else if (base == BottomIO) {
+ /* SW Node Hoist */
+ pMCTstat->GStatus |= 1<<GSB_SpIntRemapHole;
+ pDCTstat->Status |= 1<<SB_SWNodeHole;
+ pMCTstat->GStatus |= 1<<GSB_SoftHole;
+ pMCTstat->HoleBase = base;
+ limit -= base;
+ base = _4GB_RJ8;
+ limit += base;
+ pDCTstat->DCTSysBase = base;
+ pDCTstat->DCTSysLimit = limit;
+ } else {
+ /* No Remapping. Normal Contiguous mapping */
+ pDCTstat->DCTSysBase = base;
+ pDCTstat->DCTSysLimit = limit;
+ }
+ } else {
+ /*No Remapping. Normal Contiguous mapping*/
+ pDCTstat->DCTSysBase = base;
+ pDCTstat->DCTSysLimit = limit;
+ }
+ base |= 3; /* set WE,RE fields*/
+ pMCTstat->SysLimit = limit;
+ }
+ Set_NB32(dev, 0x40 + (Node << 3), base); /* [Node] + Dram Base 0 */
+ val = limit & 0xffff0000;
+ val |= Node; /* set DstNode*/
+ Set_NB32(dev, 0x44 + (Node << 3), val); /* set DstNode */
+
+ limit = pDCTstat->DCTSysLimit;
+ if (limit) {
+ NextBase = (limit & 0xffff0000) + 0x10000;
+ }
+ }
+
+ /* Copy dram map from Node 0 to Node 1-7 */
+ for (Node = 1; Node < MAX_NODES_SUPPORTED; Node++) {
+ pDCTstat = pDCTstatA + Node;
+ u32 reg;
+ u32 devx = pDCTstat->dev_map;
+
+ if (pDCTstat->NodePresent) {
+ printk_debug(" Copy dram map from Node 0 to Node %02x \n", Node);
+ reg = 0x40; /*Dram Base 0*/
+ do {
+ val = Get_NB32(dev, reg);
+ Set_NB32(devx, reg, val);
+ reg += 4;
+ } while ( reg < 0x80);
+ } else {
+ break; /* stop at first absent Node */
+ }
+ }
+
+ /*Copy dram map to F1x120/124*/
+ mct_HTMemMapExt(pMCTstat, pDCTstatA);
+}
+
+
+static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+
+ /* Initiates a memory clear operation for all node. The mem clr
+ * is done in paralel. After the memclr is complete, all processors
+ * status are checked to ensure that memclr has completed.
+ */
+ u8 Node;
+ struct DCTStatStruc *pDCTstat;
+
+ if (!mctGet_NVbits(NV_DQSTrainCTL)){
+ // FIXME: callback to wrapper: mctDoWarmResetMemClr_D
+ } else { // NV_DQSTrainCTL == 1
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ pDCTstat = pDCTstatA + Node;
+
+ if (pDCTstat->NodePresent) {
+ DCTMemClr_Init_D(pMCTstat, pDCTstat);
+ }
+ }
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ pDCTstat = pDCTstatA + Node;
+
+ if (pDCTstat->NodePresent) {
+ DCTMemClr_Sync_D(pMCTstat, pDCTstat);
+ }
+ }
+ }
+}
+
+
+static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 val;
+ u32 dev;
+ u32 reg;
+
+ /* Initiates a memory clear operation on one node */
+ if (pDCTstat->DCTSysLimit) {
+ dev = pDCTstat->dev_dct;
+ reg = 0x110;
+
+ do {
+ val = Get_NB32(dev, reg);
+ } while (val & (1 << MemClrBusy));
+
+ val |= (1 << MemClrInit);
+ Set_NB32(dev, reg, val);
+
+ }
+}
+
+
+static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ /* Ensures that memory clear has completed on all node.*/
+ u8 Node;
+ struct DCTStatStruc *pDCTstat;
+
+ if (!mctGet_NVbits(NV_DQSTrainCTL)){
+ // callback to wrapper: mctDoWarmResetMemClr_D
+ } else { // NV_DQSTrainCTL == 1
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ pDCTstat = pDCTstatA + Node;
+
+ if (pDCTstat->NodePresent) {
+ DCTMemClr_Sync_D(pMCTstat, pDCTstat);
+ }
+ }
+ }
+}
+
+
+static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 val;
+ u32 dev = pDCTstat->dev_dct;
+ u32 reg;
+
+ /* Ensure that a memory clear operation has completed on one node */
+ if (pDCTstat->DCTSysLimit){
+ reg = 0x110;
+
+ do {
+ val = Get_NB32(dev, reg);
+ } while (val & (1 << MemClrBusy));
+
+ do {
+ val = Get_NB32(dev, reg);
+ } while (!(val & (1 << Dr_MemClrStatus)));
+ }
+
+ val = 0x0FE40FC0; // BKDG recommended
+ val |= MCCH_FlushWrOnStpGnt; // Set for S3
+ Set_NB32(dev, 0x11C, val);
+}
+
+
+static u8 NodePresent_D(u8 Node)
+{
+ /*
+ * Determine if a single Hammer Node exists within the network.
+ */
+
+ u32 dev;
+ u32 val;
+ u32 dword;
+ u8 ret = 0;
+
+ dev = PA_HOST(Node); /*test device/vendor id at host bridge */
+ val = Get_NB32(dev, 0);
+ dword = mct_NodePresent_D(); /* FIXME: BOZO -11001022h rev for F */
+ if (val == dword) { /* AMD Hammer Family CPU HT Configuration */
+ if (oemNodePresent_D(Node, &ret))
+ goto finish;
+ /* Node ID register */
+ val = Get_NB32(dev, 0x60);
+ val &= 0x07;
+ dword = Node;
+ if (val == dword) /* current nodeID = requested nodeID ? */
+ ret = 1;
+finish:
+ ;
+ }
+
+ return ret;
+}
+
+
+static void DCTInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ /*
+ * Initialize DRAM on single Athlon 64/Opteron Node.
+ */
+
+ u8 stopDCTflag;
+ u32 val;
+
+ ClearDCT_D(pMCTstat, pDCTstat, dct);
+ stopDCTflag = 1; /*preload flag with 'disable' */
+ if (mct_DIMMPresence(pMCTstat, pDCTstat, dct) < SC_StopError) {
+ print_t("\t\tDCTInit_D: mct_DIMMPresence Done\n");
+ if (mct_SPDCalcWidth(pMCTstat, pDCTstat, dct) < SC_StopError) {
+ print_t("\t\tDCTInit_D: mct_SPDCalcWidth Done\n");
+ if (AutoCycTiming_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
+ print_t("\t\tDCTInit_D: AutoCycTiming_D Done\n");
+ if (AutoConfig_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
+ print_t("\t\tDCTInit_D: AutoConfig_D Done\n");
+ if (PlatformSpec_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
+ print_t("\t\tDCTInit_D: PlatformSpec_D Done\n");
+ stopDCTflag = 0;
+ if (!(pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW))) {
+ print_t("\t\tDCTInit_D: StartupDCT_D\n");
+ StartupDCT_D(pMCTstat, pDCTstat, dct); /*yeaahhh! */
+ }
+ }
+ }
+ }
+ }
+ }
+ if (stopDCTflag) {
+ u32 reg_off = dct * 0x100;
+ val = 1<<DisDramInterface;
+ Set_NB32(pDCTstat->dev_dct, reg_off+0x94, val);
+ /*To maximize power savings when DisDramInterface=1b,
+ all of the MemClkDis bits should also be set.*/
+ val = 0xFF000000;
+ Set_NB32(pDCTstat->dev_dct, reg_off+0x88, val);
+ }
+}
+
+
+static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ /* Wait (and block further access to dram) for all DCTs to be ready,
+ * by polling all InitDram bits and waiting for possible memory clear
+ * operations to be complete. Read MemClkFreqVal bit to see if
+ * the DIMMs are present in this node.
+ */
+
+ u8 Node;
+
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+ mct_SyncDCTsReady(pDCTstat);
+ }
+}
+
+
+static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ /* Read MemClkFreqVal bit to see if the DIMMs are present in this node.
+ * If the DIMMs are present then set the DRAM Enable bit for this node.
+ *
+ * Setting dram init starts up the DCT state machine, initializes the
+ * dram devices with MRS commands, and kicks off any
+ * HW memory clear process that the chip is capable of. The sooner
+ * that dram init is set for all nodes, the faster the memory system
+ * initialization can complete. Thus, the init loop is unrolled into
+ * two loops so as to start the processeses for non BSP nodes sooner.
+ * This procedure will not wait for the process to finish.
+ * Synchronization is handled elsewhere.
+ */
+
+ u32 val;
+ u32 dev;
+ u8 byte;
+ u32 reg;
+ u32 reg_off = dct * 0x100;
+
+ dev = pDCTstat->dev_dct;
+ val = Get_NB32(dev, 0x94 + reg_off);
+ if (val & (1<<MemClkFreqVal)) {
+ print_t("\t\t\tStartupDCT_D: MemClkFreqVal\n");
+ byte = mctGet_NVbits(NV_DQSTrainCTL);
+ if (byte == 1) {
+ /* Enable DQSRcvEn training mode */
+ print_t("\t\t\tStartupDCT_D: DqsRcvEnTrain set \n");
+ reg = 0x78 + reg_off;
+ val = Get_NB32(dev, reg);
+ /* Setting this bit forces a 1T window with hard left
+ * pass/fail edge and a probabalistic right pass/fail
+ * edge. LEFT edge is referenced for final
+ * receiver enable position.*/
+ val |= 1 << DqsRcvEnTrain;
+ Set_NB32(dev, reg, val);
+ }
+ mctHookBeforeDramInit(); /* generalized Hook */
+ print_t("\t\t\tStartupDCT_D: DramInit \n");
+ mct_DramInit(pMCTstat, pDCTstat, dct);
+ AfterDramInit_D(pDCTstat, dct);
+ mctHookAfterDramInit(); /* generalized Hook*/
+ }
+}
+
+
+static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 reg_end;
+ u32 dev = pDCTstat->dev_dct;
+ u32 reg = 0x40 + 0x100 * dct;
+ u32 val = 0;
+
+ if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
+ reg_end = 0x78 + 0x100 * dct;
+ } else {
+ reg_end = 0xA4 + 0x100 * dct;
+ }
+
+ while(reg < reg_end) {
+ Set_NB32(dev, reg, val);
+ reg += 4;
+ }
+
+ val = 0;
+ dev = pDCTstat->dev_map;
+ reg = 0xF0;
+ Set_NB32(dev, reg, val);
+}
+
+
+static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ /* Initialize DCT Timing registers as per DIMM SPD.
+ * For primary timing (T, CL) use best case T value.
+ * For secondary timing params., use most aggressive settings
+ * of slowest DIMM.
+ *
+ * There are three components to determining "maximum frequency":
+ * SPD component, Bus load component, and "Preset" max frequency
+ * component.
+ *
+ * The SPD component is a function of the min cycle time specified
+ * by each DIMM, and the interaction of cycle times from all DIMMs
+ * in conjunction with CAS latency. The SPD component only applies
+ * when user timing mode is 'Auto'.
+ *
+ * The Bus load component is a limiting factor determined by electrical
+ * characteristics on the bus as a result of varying number of device
+ * loads. The Bus load component is specific to each platform but may
+ * also be a function of other factors. The bus load component only
+ * applies when user timing mode is 'Auto'.
+ *
+ * The Preset component is subdivided into three items and is the
+ * minimum of the set: Silicon revision, user limit setting when user
+ * timing mode is 'Auto' and memclock mode is 'Limit', OEM build
+ * specification of the maximum frequency. The Preset component is only
+ * applies when user timing mode is 'Auto'.
+ */
+
+ u8 i;
+ u8 Twr, Trtp;
+ u8 Trp, Trrd, Trcd, Tras, Trc, Trfc[4], Rows;
+ u32 DramTimingLo, DramTimingHi;
+ u16 Tk10, Tk40;
+ u8 Twtr;
+ u8 LDIMM;
+ u8 DDR2_1066;
+ u8 byte;
+ u32 dword;
+ u32 dev;
+ u32 reg;
+ u32 reg_off;
+ u32 val;
+ u16 smbaddr;
+
+ /* Get primary timing (CAS Latency and Cycle Time) */
+ if (pDCTstat->Speed == 0) {
+ mctGet_MaxLoadFreq(pDCTstat);
+
+ /* and Factor in presets (setup options, Si cap, etc.) */
+ GetPresetmaxF_D(pMCTstat, pDCTstat);
+
+ /* Go get best T and CL as specified by DIMM mfgs. and OEM */
+ SPDGetTCL_D(pMCTstat, pDCTstat, dct);
+ /* skip callback mctForce800to1067_D */
+ pDCTstat->Speed = pDCTstat->DIMMAutoSpeed;
+ pDCTstat->CASL = pDCTstat->DIMMCASL;
+
+ /* if "manual" memclock mode */
+ if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 2)
+ pDCTstat->Speed = mctGet_NVbits(NV_MemCkVal) + 1;
+
+ mct_AfterGetCLT(pMCTstat, pDCTstat, dct);
+ }
+
+ /* Gather all DIMM mini-max values for cycle timing data */
+ Rows = 0;
+ Trp = 0;
+ Trrd = 0;
+ Trcd = 0;
+ Trtp = 0;
+ Tras = 0;
+ Trc = 0;
+ Twr = 0;
+ Twtr = 0;
+ for (i=0; i < 4; i++)
+ Trfc[i] = 0;
+
+ for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
+ LDIMM = i >> 1;
+ if (pDCTstat->DIMMValid & (1 << i)) {
+ smbaddr = Get_DIMMAddress_D(pDCTstat, i);
+ byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
+ if (Rows < byte)
+ Rows = byte; /* keep track of largest row sz */
+
+ byte = mctRead_SPD(smbaddr, SPD_TRP);
+ if (Trp < byte)
+ Trp = byte;
+
+ byte = mctRead_SPD(smbaddr, SPD_TRRD);
+ if (Trrd < byte)
+ Trrd = byte;
+
+ byte = mctRead_SPD(smbaddr, SPD_TRCD);
+ if (Trcd < byte)
+ Trcd = byte;
+
+ byte = mctRead_SPD(smbaddr, SPD_TRTP);
+ if (Trtp < byte)
+ Trtp = byte;
+
+ byte = mctRead_SPD(smbaddr, SPD_TWR);
+ if (Twr < byte)
+ Twr = byte;
+
+ byte = mctRead_SPD(smbaddr, SPD_TWTR);
+ if (Twtr < byte)
+ Twtr = byte;
+
+ val = mctRead_SPD(smbaddr, SPD_TRC);
+ if ((val == 0) || (val == 0xFF)) {
+ pDCTstat->ErrStatus |= 1<<SB_NoTrcTrfc;
+ pDCTstat->ErrCode = SC_VarianceErr;
+ val = Get_DefTrc_k_D(pDCTstat->DIMMAutoSpeed);
+ } else {
+ byte = mctRead_SPD(smbaddr, SPD_TRCRFC);
+ if (byte & 0xF0) {
+ val++; /* round up in case fractional extention is non-zero.*/
+ }
+ }
+ if (Trc < val)
+ Trc = val;
+
+ /* dev density=rank size/#devs per rank */
+ byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
+
+ val = ((byte >> 5) | (byte << 3)) & 0xFF;
+ val <<= 2;
+
+ byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE; /* dev density=2^(rows+columns+banks) */
+ if (byte == 4) {
+ val >>= 4;
+ } else if (byte == 8) {
+ val >>= 3;
+ } else if (byte == 16) {
+ val >>= 2;
+ }
+
+ byte = bsr(val);
+
+ if (Trfc[LDIMM] < byte)
+ Trfc[LDIMM] = byte;
+
+ byte = mctRead_SPD(smbaddr, SPD_TRAS);
+ if (Tras < byte)
+ Tras = byte;
+ } /* Dimm Present */
+ }
+
+ /* Convert DRAM CycleTiming values and store into DCT structure */
+ DDR2_1066 = 0;
+ byte = pDCTstat->DIMMAutoSpeed;
+ if (byte == 5)
+ DDR2_1066 = 1;
+ Tk40 = Get_40Tk_D(byte);
+ Tk10 = Tk40>>2;
+
+ /* Notes:
+ 1. All secondary time values given in SPDs are in binary with units of ns.
+ 2. Some time values are scaled by four, in order to have least count of 0.25 ns
+ (more accuracy). JEDEC SPD spec. shows which ones are x1 and x4.
+ 3. Internally to this SW, cycle time, Tk, is scaled by 10 to affect a
+ least count of 0.1 ns (more accuracy).
+ 4. SPD values not scaled are multiplied by 10 and then divided by 10T to find
+ equivalent minimum number of bus clocks (a remainder causes round-up of clocks).
+ 5. SPD values that are prescaled by 4 are multiplied by 10 and then divided by 40T to find
+ equivalent minimum number of bus clocks (a remainder causes round-up of clocks).*/
+
+ /* Tras */
+ dword = Tras * 40;
+ pDCTstat->DIMMTras = (u16)dword;
+ val = dword / Tk40;
+ if (dword % Tk40) { /* round up number of busclocks */
+ val++;
+ }
+ if (DDR2_1066) {
+ if (val < Min_TrasT_1066)
+ val = Min_TrasT_1066;
+ else if (val > Max_TrasT_1066)
+ val = Max_TrasT_1066;
+ } else {
+ if (val < Min_TrasT)
+ val = Min_TrasT;
+ else if (val > Max_TrasT)
+ val = Max_TrasT;
+ }
+ pDCTstat->Tras = val;
+
+ /* Trp */
+ dword = Trp * 10;
+ pDCTstat->DIMMTrp = dword;
+ val = dword / Tk40;
+ if (dword % Tk40) { /* round up number of busclocks */
+ val++;
+ }
+ if (DDR2_1066) {
+ if (val < Min_TrasT_1066)
+ val = Min_TrpT_1066;
+ else if (val > Max_TrpT_1066)
+ val = Max_TrpT_1066;
+ } else {
+ if (val < Min_TrpT)
+ val = Min_TrpT;
+ else if (val > Max_TrpT)
+ val = Max_TrpT;
+ }
+ pDCTstat->Trp = val;
+
+ /*Trrd*/
+ dword = Trrd * 10;
+ pDCTstat->DIMMTrrd = dword;
+ val = dword / Tk40;
+ if (dword % Tk40) { /* round up number of busclocks */
+ val++;
+ }
+ if (DDR2_1066) {
+ if (val < Min_TrrdT_1066)
+ val = Min_TrrdT_1066;
+ else if (val > Max_TrrdT_1066)
+ val = Max_TrrdT_1066;
+ } else {
+ if (val < Min_TrrdT)
+ val = Min_TrrdT;
+ else if (val > Max_TrrdT)
+ val = Max_TrrdT;
+ }
+ pDCTstat->Trrd = val;
+
+ /* Trcd */
+ dword = Trcd * 10;
+ pDCTstat->DIMMTrcd = dword;
+ val = dword / Tk40;
+ if (dword % Tk40) { /* round up number of busclocks */
+ val++;
+ }
+ if (DDR2_1066) {
+ if (val < Min_TrcdT_1066)
+ val = Min_TrcdT_1066;
+ else if (val > Max_TrcdT_1066)
+ val = Max_TrcdT_1066;
+ } else {
+ if (val < Min_TrcdT)
+ val = Min_TrcdT;
+ else if (val > Max_TrcdT)
+ val = Max_TrcdT;
+ }
+ pDCTstat->Trcd = val;
+
+ /* Trc */
+ dword = Trc * 40;
+ pDCTstat->DIMMTrc = dword;
+ val = dword / Tk40;
+ if (dword % Tk40) { /* round up number of busclocks */
+ val++;
+ }
+ if (DDR2_1066) {
+ if (val < Min_TrcT_1066)
+ val = Min_TrcT_1066;
+ else if (val > Max_TrcT_1066)
+ val = Max_TrcT_1066;
+ } else {
+ if (val < Min_TrcT)
+ val = Min_TrcT;
+ else if (val > Max_TrcT)
+ val = Max_TrcT;
+ }
+ pDCTstat->Trc = val;
+
+ /* Trtp */
+ dword = Trtp * 10;
+ pDCTstat->DIMMTrtp = dword;
+ val = pDCTstat->Speed;
+ if (val <= 2) {
+ val = 2; /* Calculate by 7.75ns / Speed in ns to get clock # */
+ } else if (val == 4) { /* Note a speed of 3 will be a Trtp of 3 */
+ val = 3;
+ } else if (val == 5){
+ val = 2;
+ }
+ pDCTstat->Trtp = val;
+
+ /* Twr */
+ dword = Twr * 10;
+ pDCTstat->DIMMTwr = dword;
+ val = dword / Tk40;
+ if (dword % Tk40) { /* round up number of busclocks */
+ val++;
+ }
+ if (DDR2_1066) {
+ if (val < Min_TwrT_1066)
+ val = Min_TwrT_1066;
+ else if (val > Max_TwrT_1066)
+ val = Max_TwrT_1066;
+ } else {
+ if (val < Min_TwrT)
+ val = Min_TwrT;
+ else if (val > Max_TwrT)
+ val = Max_TwrT;
+ }
+ pDCTstat->Twr = val;
+
+ /* Twtr */
+ dword = Twtr * 10;
+ pDCTstat->DIMMTwtr = dword;
+ val = dword / Tk40;
+ if (dword % Tk40) { /* round up number of busclocks */
+ val++;
+ }
+ if (DDR2_1066) {
+ if (val < Min_TwrT_1066)
+ val = Min_TwtrT_1066;
+ else if (val > Max_TwtrT_1066)
+ val = Max_TwtrT_1066;
+ } else {
+ if (val < Min_TwtrT)
+ val = Min_TwtrT;
+ else if (val > Max_TwtrT)
+ val = Max_TwtrT;
+ }
+ pDCTstat->Twtr = val;
+
+
+ /* Trfc0-Trfc3 */
+ for (i=0; i<4; i++)
+ pDCTstat->Trfc[i] = Trfc[i];
+
+ mctAdjustAutoCycTmg_D();
+
+ /* Program DRAM Timing values */
+ DramTimingLo = 0; /* Dram Timing Low init */
+ val = pDCTstat->CASL;
+ val = Tab_tCL_j[val];
+ DramTimingLo |= val;
+
+ val = pDCTstat->Trcd;
+ if (DDR2_1066)
+ val -= Bias_TrcdT_1066;
+ else
+ val -= Bias_TrcdT;
+
+ DramTimingLo |= val<<4;
+
+ val = pDCTstat->Trp;
+ if (DDR2_1066)
+ val -= Bias_TrpT_1066;
+ else {
+ val -= Bias_TrpT;
+ val <<= 1;
+ }
+ DramTimingLo |= val<<7;
+
+ val = pDCTstat->Trtp;
+ val -= Bias_TrtpT;
+ DramTimingLo |= val<<11;
+
+ val = pDCTstat->Tras;
+ if (DDR2_1066)
+ val -= Bias_TrasT_1066;
+ else
+ val -= Bias_TrasT;
+ DramTimingLo |= val<<12;
+
+ val = pDCTstat->Trc;
+ val -= Bias_TrcT;
+ DramTimingLo |= val<<16;
+
+ if (!DDR2_1066) {
+ val = pDCTstat->Twr;
+ val -= Bias_TwrT;
+ DramTimingLo |= val<<20;
+ }
+
+ val = pDCTstat->Trrd;
+ if (DDR2_1066)
+ val -= Bias_TrrdT_1066;
+ else
+ val -= Bias_TrrdT;
+ DramTimingLo |= val<<22;
+
+
+ DramTimingHi = 0; /* Dram Timing Low init */
+ val = pDCTstat->Twtr;
+ if (DDR2_1066)
+ val -= Bias_TwtrT_1066;
+ else
+ val -= Bias_TwtrT;
+ DramTimingHi |= val<<8;
+
+ val = 2;
+ DramTimingHi |= val<<16;
+
+ val = 0;
+ for (i=4;i>0;i--) {
+ val <<= 3;
+ val |= Trfc[i-1];
+ }
+ DramTimingHi |= val << 20;
+
+
+ dev = pDCTstat->dev_dct;
+ reg_off = 0x100 * dct;
+ print_tx("AutoCycTiming: DramTimingLo ", DramTimingLo);
+ print_tx("AutoCycTiming: DramTimingHi ", DramTimingHi);
+
+ Set_NB32(dev, 0x88 + reg_off, DramTimingLo); /*DCT Timing Low*/
+ DramTimingHi |=0x0000FC77;
+ Set_NB32(dev, 0x8c + reg_off, DramTimingHi); /*DCT Timing Hi*/
+
+ if (DDR2_1066) {
+ /* Twr */
+ dword = pDCTstat->Twr;
+ dword -= Bias_TwrT_1066;
+ dword <<= 4;
+ reg = 0x84 + reg_off;
+ val = Get_NB32(dev, reg);
+ val &= 0x8F;
+ val |= dword;
+ Set_NB32(dev, reg, val);
+ }
+// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
+
+ print_tx("AutoCycTiming: Status ", pDCTstat->Status);
+ print_tx("AutoCycTiming: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("AutoCycTiming: ErrCode ", pDCTstat->ErrCode);
+ print_t("AutoCycTiming: Done\n");
+
+ mctHookAfterAutoCycTmg();
+
+ return pDCTstat->ErrCode;
+}
+
+
+static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ /* Get max frequency from OEM platform definition, from any user
+ * override (limiting) of max frequency, and from any Si Revision
+ * Specific information. Return the least of these three in
+ * DCTStatStruc.PresetmaxFreq.
+ */
+
+ u16 proposedFreq;
+ u16 word;
+
+ /* Get CPU Si Revision defined limit (NPT) */
+ proposedFreq = 533; /* Rev F0 programmable max memclock is */
+
+ /*Get User defined limit if "limit" mode */
+ if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
+ word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
+ if (word < proposedFreq)
+ proposedFreq = word;
+
+ /* Get Platform defined limit */
+ word = mctGet_NVbits(NV_MAX_MEMCLK);
+ if (word < proposedFreq)
+ proposedFreq = word;
+
+ word = pDCTstat->PresetmaxFreq;
+ if (word > proposedFreq)
+ word = proposedFreq;
+
+ pDCTstat->PresetmaxFreq = word;
+ }
+}
+
+
+
+static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ /* Find the best T and CL primary timing parameter pair, per Mfg.,
+ * for the given set of DIMMs, and store into DCTStatStruc
+ * (.DIMMAutoSpeed and .DIMMCASL). See "Global relationship between
+ * index values and item values" for definition of CAS latency
+ * index (j) and Frequency index (k).
+ */
+ int i, j, k;
+ u8 T1min, CL1min;
+
+ /* i={0..7} (std. physical DIMM number)
+ * j is an integer which enumerates increasing CAS latency.
+ * k is an integer which enumerates decreasing cycle time.
+ * CL no. {0,1,2} corresponds to CL X, CL X-.5, or CL X-1 (per individual DIMM)
+ * Max timing values are per parameter, of all DIMMs, spec'd in ns like the SPD.
+ */
+
+ CL1min = 0xFF;
+ T1min = 0xFF;
+ for (k=K_MAX; k >= K_MIN; k--) {
+ for (j = J_MIN; j <= J_MAX; j++) {
+ if (Sys_Capability_D(pMCTstat, pDCTstat, j, k) ) {
+ /* 1. check to see if DIMMi is populated.
+ 2. check if DIMMi supports CLj and Tjk */
+ for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
+ if (pDCTstat->DIMMValid & (1 << i)) {
+ if (Dimm_Supports_D(pDCTstat, i, j, k))
+ break;
+ }
+ } /* while ++i */
+ if (i == MAX_DIMMS_SUPPORTED) {
+ T1min = k;
+ CL1min = j;
+ goto got_TCL;
+ }
+ }
+ } /* while ++j */
+ } /* while --k */
+
+got_TCL:
+ if (T1min != 0xFF) {
+ pDCTstat->DIMMCASL = CL1min; /*mfg. optimized */
+ pDCTstat->DIMMAutoSpeed = T1min;
+ print_tx("SPDGetTCL_D: DIMMCASL ", pDCTstat->DIMMCASL);
+ print_tx("SPDGetTCL_D: DIMMAutoSpeed ", pDCTstat->DIMMAutoSpeed);
+
+ } else {
+ pDCTstat->DIMMCASL = CL_DEF; /* failsafe values (running in min. mode) */
+ pDCTstat->DIMMAutoSpeed = T_DEF;
+ pDCTstat->ErrStatus |= 1 << SB_DimmMismatchT;
+ pDCTstat->ErrStatus |= 1 << SB_MinimumMode;
+ pDCTstat->ErrCode = SC_VarianceErr;
+ }
+ print_tx("SPDGetTCL_D: Status ", pDCTstat->Status);
+ print_tx("SPDGetTCL_D: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("SPDGetTCL_D: ErrCode ", pDCTstat->ErrCode);
+ print_t("SPDGetTCL_D: Done\n");
+}
+
+
+static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 dev;
+ u32 reg;
+ u32 val;
+
+ mctGet_PS_Cfg_D(pMCTstat, pDCTstat, dct);
+
+ if (pDCTstat->GangedMode) {
+ mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 1);
+ }
+
+ if ( pDCTstat->_2Tmode == 2) {
+ dev = pDCTstat->dev_dct;
+ reg = 0x94 + 0x100 * dct; /* Dram Configuration Hi */
+ val = Get_NB32(dev, reg);
+ val |= 1 << 20; /* 2T CMD mode */
+ Set_NB32(dev, reg, val);
+ }
+
+ mct_PlatformSpec(pMCTstat, pDCTstat, dct);
+ InitPhyCompensation(pMCTstat, pDCTstat, dct);
+ mctHookAfterPSCfg();
+ return pDCTstat->ErrCode;
+}
+
+
+static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 DramControl, DramTimingLo, Status;
+ u32 DramConfigLo, DramConfigHi, DramConfigMisc, DramConfigMisc2;
+ u32 val;
+ u32 reg_off;
+ u32 dev;
+ u16 word;
+ u32 dword;
+ u8 byte;
+
+ print_tx("AutoConfig_D: DCT: ", dct);
+
+ DramConfigLo = 0;
+ DramConfigHi = 0;
+ DramConfigMisc = 0;
+ DramConfigMisc2 = 0;
+
+ /* set bank addessing and Masks, plus CS pops */
+ SPDSetBanks_D(pMCTstat, pDCTstat, dct);
+ if (pDCTstat->ErrCode == SC_StopError)
+ goto AutoConfig_exit;
+
+ /* map chip-selects into local address space */
+ StitchMemory_D(pMCTstat, pDCTstat, dct);
+ InterleaveBanks_D(pMCTstat, pDCTstat, dct);
+
+ /* temp image of status (for convenience). RO usage! */
+ Status = pDCTstat->Status;
+
+ dev = pDCTstat->dev_dct;
+ reg_off = 0x100 * dct;
+
+
+ /* Build Dram Control Register Value */
+ DramConfigMisc2 = Get_NB32 (dev, 0xA8 + reg_off); /* Dram Control*/
+ DramControl = Get_NB32 (dev, 0x78 + reg_off); /* Dram Control*/
+
+ if (mctGet_NVbits(NV_CLKHZAltVidC3))
+ DramControl |= 1<<16;
+
+ // FIXME: Add support(skip) for Ax and Cx versions
+ DramControl |= 5; /* RdPtrInit */
+
+
+ /* Build Dram Config Lo Register Value */
+ DramConfigLo |= 1 << 4; /* 75 Ohms ODT */
+ if (mctGet_NVbits(NV_MAX_DIMMS) == 8) {
+ if (pDCTstat->Speed == 3) {
+ if ((pDCTstat->MAdimms[dct] == 4))
+ DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
+ } else if (pDCTstat->Speed == 4){
+ if ((pDCTstat->MAdimms[dct] != 1))
+ DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
+ }
+ } else {
+ // FIXME: Skip for Ax versions
+ if ((pDCTstat->MAdimms[dct] == 4)) {
+ if ( pDCTstat->DimmQRPresent != 0) {
+ if ((pDCTstat->Speed == 3) || (pDCTstat->Speed == 4)) {
+ DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
+ }
+ } else if ((pDCTstat->MAdimms[dct] == 4)) {
+ if (pDCTstat->Speed == 4) {
+ if ( pDCTstat->DimmQRPresent != 0) {
+ DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
+ }
+ }
+ }
+ } else if ((pDCTstat->MAdimms[dct] == 2)) {
+ DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
+ }
+
+ }
+
+ // FIXME: Skip for Ax versions
+ /* callback not required - if (!mctParityControl_D()) */
+ if (Status & (1 << SB_PARDIMMs)) {
+ DramConfigLo |= 1 << ParEn;
+ DramConfigMisc2 |= 1 << ActiveCmdAtRst;
+ } else {
+ DramConfigLo &= ~(1 << ParEn);
+ DramConfigMisc2 &= ~(1 << ActiveCmdAtRst);
+ }
+
+ if (mctGet_NVbits(NV_BurstLen32)) {
+ if (!pDCTstat->GangedMode)
+ DramConfigLo |= 1 << BurstLength32;
+ }
+
+ if (Status & (1 << SB_128bitmode))
+ DramConfigLo |= 1 << Width128; /* 128-bit mode (normal) */
+
+ word = dct;
+ dword = X4Dimm;
+ while (word < 8) {
+ if (pDCTstat->Dimmx4Present & (1 << word))
+ DramConfigLo |= 1 << dword; /* X4Dimm[3:0] */
+ word++;
+ word++;
+ dword++;
+ }
+
+ if (!(Status & (1 << SB_Registered)))
+ DramConfigLo |= 1 << UnBuffDimm; /* Unbufferd DIMMs */
+
+ if (mctGet_NVbits(NV_ECC_CAP))
+ if (Status & (1 << SB_ECCDIMMs))
+ if ( mctGet_NVbits(NV_ECC))
+ DramConfigLo |= 1 << DimmEcEn;
+
+
+
+ /* Build Dram Config Hi Register Value */
+ dword = pDCTstat->Speed;
+ DramConfigHi |= dword - 1; /* get MemClk encoding */
+ DramConfigHi |= 1 << MemClkFreqVal;
+
+ if (Status & (1 << SB_Registered))
+ if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0))
+ /* set only if x8 Registered DIMMs in System*/
+ DramConfigHi |= 1 << RDqsEn;
+
+ if (mctGet_NVbits(NV_CKE_PDEN)) {
+ DramConfigHi |= 1 << 15; /* PowerDownEn */
+ if (mctGet_NVbits(NV_CKE_CTL))
+ /*Chip Select control of CKE*/
+ DramConfigHi |= 1 << 16;
+ }
+
+ /* Control Bank Swizzle */
+ if (0) /* call back not needed mctBankSwizzleControl_D()) */
+ DramConfigHi &= ~(1 << BankSwizzleMode);
+ else
+ DramConfigHi |= 1 << BankSwizzleMode; /* recommended setting (default) */
+
+ /* Check for Quadrank DIMM presence */
+ if ( pDCTstat->DimmQRPresent != 0) {
+ byte = mctGet_NVbits(NV_4RANKType);
+ if (byte == 2)
+ DramConfigHi |= 1 << 17; /* S4 (4-Rank SO-DIMMs) */
+ else if (byte == 1)
+ DramConfigHi |= 1 << 18; /* R4 (4-Rank Registered DIMMs) */
+ }
+
+ if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
+ val = mctGet_NVbits(NV_BYPMAX);
+ else
+ val = 0x0f; // recommended setting (default)
+ DramConfigHi |= val << 24;
+
+ val = pDCTstat->DIMM2Kpage;
+ if (pDCTstat->GangedMode != 0) {
+ if (dct != 0) {
+ val &= 0x55;
+ } else {
+ val &= 0xAA;
+ }
+ }
+ if (val)
+ val = Tab_2KTfawT_k[pDCTstat->Speed];
+ else
+ val = Tab_1KTfawT_k[pDCTstat->Speed];
+
+ if (pDCTstat->Speed == 5)
+ val >>= 1;
+
+ val -= Bias_TfawT;
+ val <<= 28;
+ DramConfigHi |= val; /* Tfaw for 1K or 2K paged drams */
+
+ // FIXME: Skip for Ax versions
+ DramConfigHi |= 1 << DcqArbBypassEn;
+
+
+ /* Build MemClkDis Value from Dram Timing Lo and
+ Dram Config Misc Registers
+ 1. We will assume that MemClkDis field has been preset prior to this
+ point.
+ 2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
+ NV_AllMemClks <>0 AND SB_DiagClks ==0 */
+
+
+ /* Dram Timing Low (owns Clock Enable bits) */
+ DramTimingLo = Get_NB32(dev, 0x88 + reg_off);
+ if (mctGet_NVbits(NV_AllMemClks) == 0) {
+ /* Special Jedec SPD diagnostic bit - "enable all clocks" */
+ if (!(pDCTstat->Status & (1<<SB_DiagClks))) {
+ const u8 *p;
+ byte = mctGet_NVbits(NV_PACK_TYPE);
+ if (byte == PT_L1)
+ p = Tab_L1CLKDis;
+ else if (byte == PT_M2)
+ p = Tab_M2CLKDis;
+ else
+ p = Tab_S1CLKDis;
+
+ dword = 0;
+ while(dword < MAX_DIMMS_SUPPORTED) {
+ val = p[dword];
+ print_tx("DramTimingLo: val=", val);
+ if (!(pDCTstat->DIMMValid & (1<<val)))
+ /*disable memclk*/
+ DramTimingLo |= 1<<(dword+24);
+ dword++ ;
+ }
+ }
+ }
+
+ print_tx("AutoConfig_D: DramControl: ", DramControl);
+ print_tx("AutoConfig_D: DramTimingLo: ", DramTimingLo);
+ print_tx("AutoConfig_D: DramConfigMisc: ", DramConfigMisc);
+ print_tx("AutoConfig_D: DramConfigMisc2: ", DramConfigMisc2);
+ print_tx("AutoConfig_D: DramConfigLo: ", DramConfigLo);
+ print_tx("AutoConfig_D: DramConfigHi: ", DramConfigHi);
+
+ /* Write Values to the registers */
+ Set_NB32(dev, 0x78 + reg_off, DramControl);
+ Set_NB32(dev, 0x88 + reg_off, DramTimingLo);
+ Set_NB32(dev, 0xA0 + reg_off, DramConfigMisc);
+ Set_NB32(dev, 0xA8 + reg_off, DramConfigMisc2);
+ Set_NB32(dev, 0x90 + reg_off, DramConfigLo);
+ mct_SetDramConfigHi_D(pDCTstat, dct, DramConfigHi);
+ mct_ForceAutoPrecharge_D(pDCTstat, dct);
+ mct_EarlyArbEn_D(pMCTstat, pDCTstat);
+ mctHookAfterAutoCfg();
+
+// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
+
+ print_tx("AutoConfig: Status ", pDCTstat->Status);
+ print_tx("AutoConfig: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("AutoConfig: ErrCode ", pDCTstat->ErrCode);
+ print_t("AutoConfig: Done\n");
+AutoConfig_exit:
+ return pDCTstat->ErrCode;
+}
+
+
+static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ /* Set bank addressing, program Mask values and build a chip-select
+ * population map. This routine programs PCI 0:24N:2x80 config register
+ * and PCI 0:24N:2x60,64,68,6C config registers (CS Mask 0-3).
+ */
+
+ u8 ChipSel, Rows, Cols, Ranks ,Banks, DevWidth;
+ u32 BankAddrReg, csMask;
+
+ u32 val;
+ u32 reg;
+ u32 dev;
+ u32 reg_off;
+ u8 byte;
+ u16 word;
+ u32 dword;
+ u16 smbaddr;
+
+ dev = pDCTstat->dev_dct;
+ reg_off = 0x100 * dct;
+
+ BankAddrReg = 0;
+ for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel+=2) {
+ byte = ChipSel;
+ if ((pDCTstat->Status & (1 << SB_64MuxedMode)) && ChipSel >=4)
+ byte -= 3;
+
+ if (pDCTstat->DIMMValid & (1<<byte)) {
+ smbaddr = Get_DIMMAddress_D(pDCTstat, (ChipSel + dct));
+
+ byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
+ Rows = byte & 0x1f;
+
+ byte = mctRead_SPD(smbaddr, SPD_COLSZ);
+ Cols = byte & 0x1f;
+
+ Banks = mctRead_SPD(smbaddr, SPD_LBANKS);
+
+ byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH);
+ DevWidth = byte & 0x7f; /* bits 0-6 = bank 0 width */
+
+ byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
+ Ranks = (byte & 7) + 1;
+
+ /* Configure Bank encoding
+ * Use a 6-bit key into a lookup table.
+ * Key (index) = CCCBRR, where CCC is the number of
+ * Columns minus 9,RR is the number of Rows minus 13,
+ * and B is the number of banks minus 2.
+ * See "6-bit Bank Addressing Table" at the end of
+ * this file.*/
+ byte = Cols - 9; /* 9 Cols is smallest dev size */
+ byte <<= 3; /* make room for row and bank bits*/
+ if (Banks == 8)
+ byte |= 4;
+
+ /* 13 Rows is smallest dev size */
+ byte |= Rows - 13; /* CCCBRR internal encode */
+
+ for (dword=0; dword < 12; dword++) {
+ if (byte == Tab_BankAddr[dword])
+ break;
+ }
+
+ if (dword < 12) {
+
+ /* bit no. of CS field in address mapping reg.*/
+ dword <<= (ChipSel<<1);
+ BankAddrReg |= dword;
+
+ /* Mask value=(2pow(rows+cols+banks+3)-1)>>8,
+ or 2pow(rows+cols+banks-5)-1*/
+ csMask = 0;
+
+ byte = Rows + Cols; /* cl=rows+cols*/
+ if (Banks == 8)
+ byte -= 2; /* 3 banks - 5 */
+ else
+ byte -= 3; /* 2 banks - 5 */
+ /* mask size (64-bit rank only) */
+
+ if (pDCTstat->Status & (1 << SB_128bitmode))
+ byte++; /* double mask size if in 128-bit mode*/
+
+ csMask |= 1 << byte;
+ csMask--;
+
+ /*set ChipSelect population indicator even bits*/
+ pDCTstat->CSPresent |= (1<<ChipSel);
+ if (Ranks >= 2)
+ /*set ChipSelect population indicator odd bits*/
+ pDCTstat->CSPresent |= 1 << (ChipSel + 1);
+
+ reg = 0x60+(ChipSel<<1) + reg_off; /*Dram CS Mask Register */
+ val = csMask;
+ val &= 0x1FF83FE0; /* Mask out reserved bits.*/
+ Set_NB32(dev, reg, val);
+ }
+ } else {
+ if (pDCTstat->DIMMSPDCSE & (1<<ChipSel))
+ pDCTstat->CSTestFail |= (1<<ChipSel);
+ } /* if DIMMValid*/
+ } /* while ChipSel*/
+
+ SetCSTriState(pMCTstat, pDCTstat, dct);
+ /* SetCKETriState */
+ SetODTTriState(pMCTstat, pDCTstat, dct);
+
+ if ( pDCTstat->Status & 1<<SB_128bitmode) {
+ SetCSTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
+ SetODTTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
+ }
+ word = pDCTstat->CSPresent;
+ mctGetCS_ExcludeMap(); /* mask out specified chip-selects */
+ word ^= pDCTstat->CSPresent;
+ pDCTstat->CSTestFail |= word; /* enable ODT to disabled DIMMs */
+ if (!pDCTstat->CSPresent)
+ pDCTstat->ErrCode = SC_StopError;
+
+ reg = 0x80 + reg_off; /* Bank Addressing Register */
+ Set_NB32(dev, reg, BankAddrReg);
+
+// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
+
+ print_tx("SPDSetBanks: Status ", pDCTstat->Status);
+ print_tx("SPDSetBanks: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("SPDSetBanks: ErrCode ", pDCTstat->ErrCode);
+ print_t("SPDSetBanks: Done\n");
+}
+
+
+static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ /* Per SPDs, check the symmetry of DIMM pairs (DIMM on Channel A
+ * matching with DIMM on Channel B), the overall DIMM population,
+ * and determine the width mode: 64-bit, 64-bit muxed, 128-bit.
+ */
+
+ u8 i;
+ u8 smbaddr, smbaddr1;
+ u8 byte, byte1;
+
+ /* Check Symmetry of Channel A and Channel B DIMMs
+ (must be matched for 128-bit mode).*/
+ for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
+ if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
+ smbaddr = Get_DIMMAddress_D(pDCTstat, i);
+ smbaddr1 = Get_DIMMAddress_D(pDCTstat, i+1);
+
+ byte = mctRead_SPD(smbaddr, SPD_ROWSZ) & 0x1f;
+ byte1 = mctRead_SPD(smbaddr1, SPD_ROWSZ) & 0x1f;
+ if (byte != byte1) {
+ pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+ break;
+ }
+
+ byte = mctRead_SPD(smbaddr, SPD_COLSZ) & 0x1f;
+ byte1 = mctRead_SPD(smbaddr1, SPD_COLSZ) & 0x1f;
+ if (byte != byte1) {
+ pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+ break;
+ }
+
+ byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
+ byte1 = mctRead_SPD(smbaddr1, SPD_BANKSZ);
+ if (byte != byte1) {
+ pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+ break;
+ }
+
+ byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0x7f;
+ byte1 = mctRead_SPD(smbaddr1, SPD_DEVWIDTH) & 0x7f;
+ if (byte != byte1) {
+ pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+ break;
+ }
+
+ byte = mctRead_SPD(smbaddr, SPD_DMBANKS) & 7; /* #ranks-1 */
+ byte1 = mctRead_SPD(smbaddr1, SPD_DMBANKS) & 7; /* #ranks-1 */
+ if (byte != byte1) {
+ pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
+ break;
+ }
+
+ }
+ }
+
+}
+
+
+static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ /* Requires that Mask values for each bank be programmed first and that
+ * the chip-select population indicator is correctly set.
+ */
+
+ u8 b = 0;
+ u32 nxtcsBase, curcsBase;
+ u8 p, q;
+ u32 Sizeq, BiggestBank;
+ u8 _DSpareEn;
+
+ u16 word;
+ u32 dev;
+ u32 reg;
+ u32 reg_off;
+ u32 val;
+
+
+ dev = pDCTstat->dev_dct;
+ reg_off = 0x100 * dct;
+
+ _DSpareEn = 0;
+
+ /* CS Sparing 1=enabled, 0=disabled */
+ if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
+ if (MCT_DIMM_SPARE_NO_WARM) {
+ /* Do no warm-reset DIMM spare */
+ if (pMCTstat->GStatus & 1 << GSB_EnDIMMSpareNW) {
+ word = pDCTstat->CSPresent;
+ val = bsf(word);
+ word &= ~(1<<val);
+ if (word)
+ /* Make sure at least two chip-selects are available */
+ _DSpareEn = 1;
+ else
+ pDCTstat->ErrStatus |= 1 << SB_SpareDis;
+ }
+ } else {
+ if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
+ word = pDCTstat->CSPresent;
+ val = bsf(word);
+ word &= ~(1 << val);
+ if (word)
+ /* Make sure at least two chip-selects are available */
+ _DSpareEn = 1;
+ else
+ pDCTstat->ErrStatus |= 1 << SB_SpareDis;
+ }
+ }
+ }
+
+ nxtcsBase = 0; /* Next available cs base ADDR[39:8] */
+ for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
+ BiggestBank = 0;
+ for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
+ if (pDCTstat->CSPresent & (1 << q)) { /* bank present? */
+ reg = 0x40 + (q << 2) + reg_off; /* Base[q] reg.*/
+ val = Get_NB32(dev, reg);
+ if (!(val & 3)) { /* (CSEnable|Spare==1)bank is enabled already? */
+ reg = 0x60 + (q << 1) + reg_off; /*Mask[q] reg.*/
+ val = Get_NB32(dev, reg);
+ val >>= 19;
+ val++;
+ val <<= 19;
+ Sizeq = val; //never used
+ if (val > BiggestBank) {
+ /*Bingo! possibly Map this chip-select next! */
+ BiggestBank = val;
+ b = q;
+ }
+ }
+ } /*if bank present */
+ } /* while q */
+ if (BiggestBank !=0) {
+ curcsBase = nxtcsBase; /* curcsBase=nxtcsBase*/
+ /* DRAM CS Base b Address Register offset */
+ reg = 0x40 + (b << 2) + reg_off;
+ if (_DSpareEn) {
+ BiggestBank = 0;
+ val = 1 << Spare; /* Spare Enable*/
+ } else {
+ val = curcsBase;
+ val |= 1 << CSEnable; /* Bank Enable */
+ }
+ Set_NB32(dev, reg, val);
+ if (_DSpareEn)
+ _DSpareEn = 0;
+ else
+ /* let nxtcsBase+=Size[b] */
+ nxtcsBase += BiggestBank;
+ }
+
+ /* bank present but disabled?*/
+ if ( pDCTstat->CSTestFail & (1 << p)) {
+ /* DRAM CS Base b Address Register offset */
+ reg = (p << 2) + 0x40 + reg_off;
+ val = 1 << TestFail;
+ Set_NB32(dev, reg, val);
+ }
+ }
+
+ if (nxtcsBase) {
+ pDCTstat->DCTSysLimit = nxtcsBase - 1;
+ mct_AfterStitchMemory(pMCTstat, pDCTstat, dct);
+ }
+
+// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
+
+ print_tx("StitchMemory: Status ", pDCTstat->Status);
+ print_tx("StitchMemory: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("StitchMemory: ErrCode ", pDCTstat->ErrCode);
+ print_t("StitchMemory: Done\n");
+}
+
+
+static u8 Get_Tk_D(u8 k)
+{
+ return Table_T_k[k];
+}
+
+
+static u8 Get_CLj_D(u8 j)
+{
+ return Table_CL2_j[j];
+}
+
+static u8 Get_DefTrc_k_D(u8 k)
+{
+ return Tab_defTrc_k[k];
+}
+
+
+static u16 Get_40Tk_D(u8 k)
+{
+ return Tab_40T_k[k]; /* FIXME: k or k<<1 ?*/
+}
+
+
+static u16 Get_Fk_D(u8 k)
+{
+ return Table_F_k[k]; /* FIXME: k or k<<1 ? */
+}
+
+
+static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat,
+ u8 i, u8 j, u8 k)
+{
+ u8 Tk, CLj, CL_i;
+ u8 ret = 0;
+
+ u32 DIMMi;
+ u8 byte;
+ u16 word, wordx;
+
+ DIMMi = Get_DIMMAddress_D(pDCTstat, i);
+
+ CLj = Get_CLj_D(j);
+
+ /* check if DIMMi supports CLj */
+ CL_i = mctRead_SPD(DIMMi, SPD_CASLAT);
+ byte = CL_i & CLj;
+ if (byte) {
+ /*find out if its CL X, CLX-1, or CLX-2 */
+ word = bsr(byte); /* bit position of CLj */
+ wordx = bsr(CL_i); /* bit position of CLX of CLi */
+ wordx -= word; /* CL number (CL no. = 0,1, 2, or 3) */
+ wordx <<= 3; /* 8 bits per SPD byte index */
+ /*get T from SPD byte 9, 23, 25*/
+ word = (EncodedTSPD >> wordx) & 0xFF;
+ Tk = Get_Tk_D(k);
+ byte = mctRead_SPD(DIMMi, word); /* DIMMi speed */
+ if (Tk < byte) {
+ ret = 1;
+ } else if (byte == 0){
+ pDCTstat->ErrStatus |= 1<<SB_NoCycTime;
+ ret = 1;
+ } else {
+ ret = 0; /* DIMM is capable! */
+ }
+ } else {
+ ret = 1;
+ }
+ return ret;
+}
+
+
+static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ /* Check DIMMs present, verify checksum, flag SDRAM type,
+ * build population indicator bitmaps, and preload bus loading
+ * of DIMMs into DCTStatStruc.
+ * MAAload=number of devices on the "A" bus.
+ * MABload=number of devices on the "B" bus.
+ * MAAdimms=number of DIMMs on the "A" bus slots.
+ * MABdimms=number of DIMMs on the "B" bus slots.
+ * DATAAload=number of ranks on the "A" bus slots.
+ * DATABload=number of ranks on the "B" bus slots.
+ */
+
+ u16 i, j;
+ u8 smbaddr, Index;
+ u16 Checksum;
+ u8 SPDCtrl;
+ u16 RegDIMMPresent, MaxDimms;
+ u8 devwidth;
+ u16 DimmSlots;
+ u8 byte = 0, bytex;
+ u16 word;
+
+ /* preload data structure with addrs */
+ mctGet_DIMMAddr(pDCTstat, pDCTstat->Node_ID);
+
+ DimmSlots = MaxDimms = mctGet_NVbits(NV_MAX_DIMMS);
+
+ SPDCtrl = mctGet_NVbits(NV_SPDCHK_RESTRT);
+
+ RegDIMMPresent = 0;
+ pDCTstat->DimmQRPresent = 0;
+
+ for (i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
+ if (i >= MaxDimms)
+ break;
+
+ if ((pDCTstat->DimmQRPresent & (1 << i)) || (i < DimmSlots)) {
+ print_tx("\t DIMMPresence: i=", i);
+ smbaddr = Get_DIMMAddress_D(pDCTstat, i);
+ print_tx("\t DIMMPresence: smbaddr=", smbaddr);
+ if (smbaddr) {
+ Checksum = 0;
+ for (Index=0; Index < 64; Index++){
+ int status;
+ status = mctRead_SPD(smbaddr, Index);
+ if (status < 0)
+ break;
+ byte = status & 0xFF;
+ if (Index < 63)
+ Checksum += byte;
+ }
+
+ if (Index == 64) {
+ pDCTstat->DIMMPresent |= 1 << i;
+ if ((Checksum & 0xFF) == byte) {
+ byte = mctRead_SPD(smbaddr, SPD_TYPE);
+ if (byte == JED_DDR2SDRAM) {
+ /*Dimm is 'Present'*/
+ pDCTstat->DIMMValid |= 1 << i;
+ }
+ } else {
+ pDCTstat->DIMMSPDCSE = 1 << i;
+ if (SPDCtrl == 0) {
+ pDCTstat->ErrStatus |= 1 << SB_DIMMChkSum;
+ pDCTstat->ErrCode = SC_StopError;
+ } else {
+ /*if NV_SPDCHK_RESTRT is set to 1, ignore faulty SPD checksum*/
+ pDCTstat->ErrStatus |= 1<<SB_DIMMChkSum;
+ byte = mctRead_SPD(smbaddr, SPD_TYPE);
+ if (byte == JED_DDR2SDRAM)
+ pDCTstat->DIMMValid |= 1 << i;
+ }
+ }
+ /* Check module type */
+ byte = mctRead_SPD(smbaddr, SPD_DIMMTYPE);
+ if (byte & JED_REGADCMSK)
+ RegDIMMPresent |= 1 << i;
+ /* Check ECC capable */
+ byte = mctRead_SPD(smbaddr, SPD_EDCTYPE);
+ if (byte & JED_ECC) {
+ /* DIMM is ECC capable */
+ pDCTstat->DimmECCPresent |= 1 << i;
+ }
+ if (byte & JED_ADRCPAR) {
+ /* DIMM is ECC capable */
+ pDCTstat->DimmPARPresent |= 1 << i;
+ }
+ /* Check if x4 device */
+ devwidth = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE;
+ if (devwidth == 4) {
+ /* DIMM is made with x4 or x16 drams */
+ pDCTstat->Dimmx4Present |= 1 << i;
+ } else if (devwidth == 8) {
+ pDCTstat->Dimmx8Present |= 1 << i;
+ } else if (devwidth == 16) {
+ pDCTstat->Dimmx16Present |= 1 << i;
+ }
+ /* check page size */
+ byte = mctRead_SPD(smbaddr, SPD_COLSZ);
+ byte &= 0x0F;
+ word = 1 << byte;
+ word >>= 3;
+ word *= devwidth; /* (((2^COLBITS) / 8) * ORG) / 2048 */
+ word >>= 11;
+ if (word)
+ pDCTstat->DIMM2Kpage |= 1 << i;
+
+ /*Check if SPD diag bit 'analysis probe installed' is set */
+ byte = mctRead_SPD(smbaddr, SPD_ATTRIB);
+ if ( byte & JED_PROBEMSK )
+ pDCTstat->Status |= 1<<SB_DiagClks;
+
+ byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
+ if (!(byte & (1<< SPDPLBit)))
+ pDCTstat->DimmPlPresent |= 1 << i;
+ byte &= 7;
+ byte++; /* ranks */
+ if (byte > 2) {
+ /* if any DIMMs are QR, we have to make two passes through DIMMs*/
+ if ( pDCTstat->DimmQRPresent == 0) {
+ MaxDimms <<= 1;
+ }
+ if (i < DimmSlots) {
+ pDCTstat->DimmQRPresent |= (1 << i) | (1 << (i+4));
+ }
+ byte = 2; /* upper two ranks of QR DIMM will be counted on another DIMM number iteration*/
+ } else if (byte == 2) {
+ pDCTstat->DimmDRPresent |= 1 << i;
+ }
+ bytex = devwidth;
+ if (devwidth == 16)
+ bytex = 4;
+ else if (devwidth == 4)
+ bytex=16;
+
+ if (byte == 2)
+ bytex <<= 1; /*double Addr bus load value for dual rank DIMMs*/
+
+ j = i & (1<<0);
+ pDCTstat->DATAload[j] += byte; /*number of ranks on DATA bus*/
+ pDCTstat->MAload[j] += bytex; /*number of devices on CMD/ADDR bus*/
+ pDCTstat->MAdimms[j]++; /*number of DIMMs on A bus */
+ /*check for DRAM package Year <= 06*/
+ byte = mctRead_SPD(smbaddr, SPD_MANDATEYR);
+ if (byte < MYEAR06) {
+ /*Year < 06 and hence Week < 24 of 06 */
+ pDCTstat->DimmYr06 |= 1 << i;
+ pDCTstat->DimmWk2406 |= 1 << i;
+ } else if (byte == MYEAR06) {
+ /*Year = 06, check if Week <= 24 */
+ pDCTstat->DimmYr06 |= 1 << i;
+ byte = mctRead_SPD(smbaddr, SPD_MANDATEWK);
+ if (byte <= MWEEK24)
+ pDCTstat->DimmWk2406 |= 1 << i;
+ }
+ }
+ }
+ }
+ }
+ print_tx("\t DIMMPresence: DIMMValid=", pDCTstat->DIMMValid);
+ print_tx("\t DIMMPresence: DIMMPresent=", pDCTstat->DIMMPresent);
+ print_tx("\t DIMMPresence: RegDIMMPresent=", RegDIMMPresent);
+ print_tx("\t DIMMPresence: DimmECCPresent=", pDCTstat->DimmECCPresent);
+ print_tx("\t DIMMPresence: DimmPARPresent=", pDCTstat->DimmPARPresent);
+ print_tx("\t DIMMPresence: Dimmx4Present=", pDCTstat->Dimmx4Present);
+ print_tx("\t DIMMPresence: Dimmx8Present=", pDCTstat->Dimmx8Present);
+ print_tx("\t DIMMPresence: Dimmx16Present=", pDCTstat->Dimmx16Present);
+ print_tx("\t DIMMPresence: DimmPlPresent=", pDCTstat->DimmPlPresent);
+ print_tx("\t DIMMPresence: DimmDRPresent=", pDCTstat->DimmDRPresent);
+ print_tx("\t DIMMPresence: DimmQRPresent=", pDCTstat->DimmQRPresent);
+ print_tx("\t DIMMPresence: DATAload[0]=", pDCTstat->DATAload[0]);
+ print_tx("\t DIMMPresence: MAload[0]=", pDCTstat->MAload[0]);
+ print_tx("\t DIMMPresence: MAdimms[0]=", pDCTstat->MAdimms[0]);
+ print_tx("\t DIMMPresence: DATAload[1]=", pDCTstat->DATAload[1]);
+ print_tx("\t DIMMPresence: MAload[1]=", pDCTstat->MAload[1]);
+ print_tx("\t DIMMPresence: MAdimms[1]=", pDCTstat->MAdimms[1]);
+
+ if (pDCTstat->DIMMValid != 0) { /* If any DIMMs are present...*/
+ if (RegDIMMPresent != 0) {
+ if ((RegDIMMPresent ^ pDCTstat->DIMMValid) !=0) {
+ /* module type DIMM mismatch (reg'ed, unbuffered) */
+ pDCTstat->ErrStatus |= 1<<SB_DimmMismatchM;
+ pDCTstat->ErrCode = SC_StopError;
+ } else{
+ /* all DIMMs are registered */
+ pDCTstat->Status |= 1<<SB_Registered;
+ }
+ }
+ if (pDCTstat->DimmECCPresent != 0) {
+ if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
+ /* all DIMMs are ECC capable */
+ pDCTstat->Status |= 1<<SB_ECCDIMMs;
+ }
+ }
+ if (pDCTstat->DimmPARPresent != 0) {
+ if ((pDCTstat->DimmPARPresent ^ pDCTstat->DIMMValid) == 0) {
+ /*all DIMMs are Parity capable */
+ pDCTstat->Status |= 1<<SB_PARDIMMs;
+ }
+ }
+ } else {
+ /* no DIMMs present or no DIMMs that qualified. */
+ pDCTstat->ErrStatus |= 1<<SB_NoDimms;
+ pDCTstat->ErrCode = SC_StopError;
+ }
+
+ print_tx("\t DIMMPresence: Status ", pDCTstat->Status);
+ print_tx("\t DIMMPresence: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("\t DIMMPresence: ErrCode ", pDCTstat->ErrCode);
+ print_t("\t DIMMPresence: Done\n");
+
+ mctHookAfterDIMMpre();
+
+ return pDCTstat->ErrCode;
+}
+
+
+static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, int j, int k)
+{
+ /* Determine if system is capable of operating at given input
+ * parameters for CL, and T. There are three components to
+ * determining "maximum frequency" in AUTO mode: SPD component,
+ * Bus load component, and "Preset" max frequency component.
+ * This procedure is used to help find the SPD component and relies
+ * on pre-determination of the bus load component and the Preset
+ * components. The generalized algorithm for finding maximum
+ * frequency is structured this way so as to optimize for CAS
+ * latency (which might get better as a result of reduced frequency).
+ * See "Global relationship between index values and item values"
+ * for definition of CAS latency index (j) and Frequency index (k).
+ */
+ u8 freqOK, ClOK;
+ u8 ret = 0;
+
+ if (Get_Fk_D(k) > pDCTstat->PresetmaxFreq)
+ freqOK = 0;
+ else
+ freqOK = 1;
+
+ /* compare proposed CAS latency with AMD Si capabilities */
+ if ((j < J_MIN) || (j > J_MAX))
+ ClOK = 0;
+ else
+ ClOK = 1;
+
+ if (freqOK && ClOK)
+ ret = 1;
+
+ return ret;
+}
+
+
+static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i)
+{
+ u8 *p;
+
+ p = pDCTstat->DIMMAddr;
+ //mct_BeforeGetDIMMAddress();
+ return p[i];
+}
+
+
+static void mct_initDCT(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 val;
+ u8 err_code;
+
+ /* Config. DCT0 for Ganged or unganged mode */
+ print_t("\tmct_initDCT: DCTInit_D 0\n");
+ DCTInit_D(pMCTstat, pDCTstat, 0);
+ if (pDCTstat->ErrCode == SC_FatalErr) {
+ // Do nothing goto exitDCTInit; /* any fatal errors? */
+ } else {
+ /* Configure DCT1 if unganged and enabled*/
+ if (!pDCTstat->GangedMode) {
+ if ( pDCTstat->DIMMValidDCT[1] > 0) {
+ print_t("\tmct_initDCT: DCTInit_D 1\n");
+ err_code = pDCTstat->ErrCode; /* save DCT0 errors */
+ pDCTstat->ErrCode = 0;
+ DCTInit_D(pMCTstat, pDCTstat, 1);
+ if (pDCTstat->ErrCode == 2) /* DCT1 is not Running */
+ pDCTstat->ErrCode = err_code; /* Using DCT0 Error code to update pDCTstat.ErrCode */
+ } else {
+ val = 1 << DisDramInterface;
+ Set_NB32(pDCTstat->dev_dct, 0x100 + 0x94, val);
+ }
+ }
+ }
+// exitDCTInit:
+}
+
+
+static void mct_DramInit(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 val;
+
+ mct_BeforeDramInit_Prod_D(pMCTstat, pDCTstat);
+ // FIXME: for rev A: mct_BeforeDramInit_D(pDCTstat, dct);
+
+ /* Disable auto refresh before Dram init when in ganged mode */
+ if (pDCTstat->GangedMode) {
+ val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
+ val |= 1 << DisAutoRefresh;
+ Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
+ }
+
+ mct_DramInit_Hw_D(pMCTstat, pDCTstat, dct);
+
+ /* Re-enable auto refresh after Dram init when in ganged mode
+ * to ensure both DCTs are in sync
+ */
+
+ if (pDCTstat->GangedMode) {
+ do {
+ val = Get_NB32(pDCTstat->dev_dct, 0x90 + (0x100 * dct));
+ } while (!(val & (1 << InitDram)));
+
+ WaitRoutine_D(50);
+
+ val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
+ val &= ~(1 << DisAutoRefresh);
+ val |= 1 << DisAutoRefresh;
+ val &= ~(1 << DisAutoRefresh);
+ }
+}
+
+
+static u8 mct_setMode(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u8 byte;
+ u8 bytex;
+ u32 val;
+ u32 reg;
+
+ byte = bytex = pDCTstat->DIMMValid;
+ bytex &= 0x55; /* CHA DIMM pop */
+ pDCTstat->DIMMValidDCT[0] = bytex;
+
+ byte &= 0xAA; /* CHB DIMM popa */
+ byte >>= 1;
+ pDCTstat->DIMMValidDCT[1] = byte;
+
+ if (byte != bytex) {
+ pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO);
+ } else {
+ if ( mctGet_NVbits(NV_Unganged) )
+ pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
+
+ if (!(pDCTstat->ErrStatus & (1 << SB_DimmMismatchO))) {
+ pDCTstat->GangedMode = 1;
+ /* valid 128-bit mode population. */
+ pDCTstat->Status |= 1 << SB_128bitmode;
+ reg = 0x110;
+ val = Get_NB32(pDCTstat->dev_dct, reg);
+ val |= 1 << DctGangEn;
+ Set_NB32(pDCTstat->dev_dct, reg, val);
+ print_tx("setMode: DRAM Controller Select Low Register = ", val);
+ }
+ }
+ return pDCTstat->ErrCode;
+}
+
+
+u32 Get_NB32(u32 dev, u32 reg)
+{
+ u32 addr;
+
+ addr = (dev>>4) | (reg & 0xFF) | ((reg & 0xf00)<<16);
+ outl((1<<31) | (addr & ~3), 0xcf8);
+
+ return inl(0xcfc);
+}
+
+
+void Set_NB32(u32 dev, u32 reg, u32 val)
+{
+ u32 addr;
+
+ addr = (dev>>4) | (reg & 0xFF) | ((reg & 0xf00)<<16);
+ outl((1<<31) | (addr & ~3), 0xcf8);
+ outl(val, 0xcfc);
+}
+
+
+u32 Get_NB32_index(u32 dev, u32 index_reg, u32 index)
+{
+ u32 dword;
+
+ Set_NB32(dev, index_reg, index);
+ dword = Get_NB32(dev, index_reg+0x4);
+
+ return dword;
+}
+
+void Set_NB32_index(u32 dev, u32 index_reg, u32 index, u32 data)
+{
+ Set_NB32(dev, index_reg, index);
+ Set_NB32(dev, index_reg + 0x4, data);
+}
+
+
+u32 Get_NB32_index_wait(u32 dev, u32 index_reg, u32 index)
+{
+
+ u32 dword;
+
+
+ index &= ~(1 << DctAccessWrite);
+ Set_NB32(dev, index_reg, index);
+ do {
+ dword = Get_NB32(dev, index_reg);
+ } while (!(dword & (1 << DctAccessDone)));
+ dword = Get_NB32(dev, index_reg + 0x4);
+
+ return dword;
+}
+
+
+void Set_NB32_index_wait(u32 dev, u32 index_reg, u32 index, u32 data)
+{
+ u32 dword;
+
+
+ Set_NB32(dev, index_reg + 0x4, data);
+ index |= (1 << DctAccessWrite);
+ Set_NB32(dev, index_reg, index);
+ do {
+ dword = Get_NB32(dev, index_reg);
+ } while (!(dword & (1 << DctAccessDone)));
+
+}
+
+
+static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ /* Get platform specific config/timing values from the interface layer
+ * and program them into DCT.
+ */
+
+ u32 dev = pDCTstat->dev_dct;
+ u32 index_reg;
+ u8 i, i_start, i_end;
+
+ if (pDCTstat->GangedMode) {
+ SyncSetting(pDCTstat);
+ i_start = 0;
+ i_end = 2;
+ } else {
+ i_start = dct;
+ i_end = dct + 1;
+ }
+ for (i=i_start; i<i_end; i++) {
+ index_reg = 0x98 + (i * 0x100);
+ Set_NB32_index_wait(dev, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]); /* Channel A Output Driver Compensation Control */
+ Set_NB32_index_wait(dev, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]); /* Channel A Output Driver Compensation Control */
+ }
+
+ return pDCTstat->ErrCode;
+
+}
+
+
+static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat)
+{
+ u32 dev;
+ u32 val;
+
+ if (pDCTstat->NodePresent) {
+ print_tx("mct_SyncDCTsReady: Node ", pDCTstat->Node_ID);
+ dev = pDCTstat->dev_dct;
+
+ if ((pDCTstat->DIMMValidDCT[0] ) || (pDCTstat->DIMMValidDCT[1])) { /* This Node has dram */
+ do {
+ val = Get_NB32(dev, 0x110);
+ } while (!(val & (1 << DramEnabled)));
+ print_t("mct_SyncDCTsReady: DramEnabled\n");
+ }
+ } /* Node is present */
+}
+
+
+static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ if (!pDCTstat->GangedMode) {
+ if (dct == 0 ) {
+ pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
+ if (pDCTstat->DIMMValidDCT[dct] == 0)
+ pDCTstat->ErrCode = SC_StopError;
+ } else {
+ pDCTstat->CSPresent = 0;
+ pDCTstat->CSTestFail = 0;
+ pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
+ if (pDCTstat->DIMMValidDCT[dct] == 0)
+ pDCTstat->ErrCode = SC_StopError;
+ }
+ }
+}
+
+static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u8 ret;
+
+ if ( dct == 0) {
+ SPDCalcWidth_D(pMCTstat, pDCTstat);
+ ret = mct_setMode(pMCTstat, pDCTstat);
+ } else {
+ ret = pDCTstat->ErrCode;
+ }
+
+ print_tx("SPDCalcWidth: Status ", pDCTstat->Status);
+ print_tx("SPDCalcWidth: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("SPDCalcWidth: ErrCode ", pDCTstat->ErrCode);
+ print_t("SPDCalcWidth: Done\n");
+
+ return ret;
+}
+
+
+static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 val;
+ u32 dword;
+ u32 dev;
+ u32 reg;
+ u8 _MemHoleRemap;
+ u32 DramHoleBase;
+
+ _MemHoleRemap = mctGet_NVbits(NV_MemHole);
+ DramHoleBase = mctGet_NVbits(NV_BottomIO);
+ DramHoleBase <<= 8;
+ /* Increase hole size so;[31:24]to[31:16]
+ * it has granularity of 128MB shl eax,8
+ * Set 'effective' bottom IOmov DramHoleBase,eax
+ */
+ pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
+
+ /* In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
+ if (!pDCTstat->GangedMode) {
+ dev = pDCTstat->dev_dct;
+ pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
+ /* if DCT0 and DCT1 exist both, set DctSelBaseAddr[47:27] */
+ if (dct == 0) {
+ if (pDCTstat->DIMMValidDCT[1] > 0) {
+ dword = pDCTstat->DCTSysLimit + 1;
+ dword += pDCTstat->NodeSysBase;
+ dword >>= 8; /* scale [39:8] to [47:27],and to F2x110[31:11] */
+ if ((dword >= DramHoleBase) && _MemHoleRemap) {
+ pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
+ val = pMCTstat->HoleBase;
+ val >>= 16;
+ val &= ~(0xFF);
+ val |= (((~val) & 0xFF) + 1);
+ val <<= 8;
+ dword += val;
+ }
+ reg = 0x110;
+ val = Get_NB32(dev, reg);
+ val &= 0x7F;
+ val |= dword;
+ val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
+ Set_NB32(dev, reg, val);
+ print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select Low Register = ", val);
+
+ reg = 0x114;
+ val = dword;
+ Set_NB32(dev, reg, val);
+ }
+ } else {
+ /* Program the DctSelBaseAddr value to 0
+ if DCT 0 is disabled */
+ if (pDCTstat->DIMMValidDCT[0] == 0) {
+ dword = pDCTstat->NodeSysBase;
+ dword >>= 8;
+ if (dword >= DramHoleBase) {
+ pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
+ val = pMCTstat->HoleBase;
+ val >>= 8;
+ val &= ~(0xFFFF);
+ val |= (((~val) & 0xFFFF) + 1);
+ dword += val;
+ }
+ reg = 0x114;
+ val = dword;
+ Set_NB32(dev, reg, val);
+
+ reg = 0x110;
+ val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
+ Set_NB32(dev, reg, val);
+ print_tx("AfterStitch DCT1 only: DRAM Controller Select Low Register = ", val);
+ }
+ }
+ } else {
+ pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
+ }
+ print_tx("AfterStitch pDCTstat->NodeSysBase = ", pDCTstat->NodeSysBase);
+ print_tx("mct_AfterStitchMemory: pDCTstat->NodeSysLimit ", pDCTstat->NodeSysLimit);
+}
+
+
+static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u8 ret;
+
+ if ( dct == 0)
+ ret = DIMMPresence_D(pMCTstat, pDCTstat);
+ else
+ ret = pDCTstat->ErrCode;
+
+ return ret;
+}
+
+
+/* mct_BeforeGetDIMMAddress inline in C */
+
+
+static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node;
+
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+ if (pDCTstat->NodePresent) {
+ if (pDCTstat->DIMMValidDCT[0]) {
+ pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
+ Set_OtherTiming(pMCTstat, pDCTstat, 0);
+ }
+ if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
+ pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
+ Set_OtherTiming(pMCTstat, pDCTstat, 1);
+ }
+ } /* Node is present*/
+ } /* while Node */
+}
+
+
+static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 reg;
+ u32 reg_off = 0x100 * dct;
+ u32 val;
+ u32 dword;
+ u32 dev = pDCTstat->dev_dct;
+
+ Get_Trdrd(pMCTstat, pDCTstat, dct);
+ Get_Twrwr(pMCTstat, pDCTstat, dct);
+ Get_Twrrd(pMCTstat, pDCTstat, dct);
+ Get_TrwtTO(pMCTstat, pDCTstat, dct);
+ Get_TrwtWB(pMCTstat, pDCTstat);
+
+ reg = 0x8C + reg_off; /* Dram Timing Hi */
+ val = Get_NB32(dev, reg);
+ val &= 0xffff0300;
+ dword = pDCTstat->TrwtTO; //0x07
+ val |= dword << 4;
+ dword = pDCTstat->Twrrd; //0x03
+ val |= dword << 10;
+ dword = pDCTstat->Twrwr; //0x03
+ val |= dword << 12;
+ dword = pDCTstat->Trdrd; //0x03
+ val |= dword << 14;
+ dword = pDCTstat->TrwtWB; //0x07
+ val |= dword;
+ val = OtherTiming_A_D(pDCTstat, val);
+ Set_NB32(dev, reg, val);
+
+}
+
+
+static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u8 Trdrd;
+ u8 byte;
+ u32 dword;
+ u32 val;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u32 dev = pDCTstat->dev_dct;
+
+ if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0)) {
+ /* mixed (x4 or x8) DIMM types
+ the largest DqsRcvEnGrossDelay of any DIMM minus the DqsRcvEnGrossDelay
+ of any other DIMM is equal to the Critical Gross Delay Difference (CGDD) for Trdrd.*/
+ byte = Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
+ if (byte == 0)
+ Trdrd = 1;
+ else
+ Trdrd = 2;
+
+ } else {
+ /*
+ Trdrd with non-mixed DIMM types
+ RdDqsTime are the same for all DIMMs and DqsRcvEn difference between
+ any two DIMMs is less than half of a MEMCLK, BIOS should program Trdrd to 0000b,
+ else BIOS should program Trdrd to 0001b.
+
+ RdDqsTime are the same for all DIMMs
+ DDR400~DDR667 only use one set register
+ DDR800 have two set register for DIMM0 and DIMM1 */
+ Trdrd = 1;
+ if (pDCTstat->Speed > 3) {
+ /* DIMM0+DIMM1 exist */ //NOTE it should be 5
+ val = bsf(pDCTstat->DIMMValid);
+ dword = bsr(pDCTstat->DIMMValid);
+ if (dword != val && dword != 0) {
+ /* DCT Read DQS Timing Control - DIMM0 - Low */
+ dword = Get_NB32_index_wait(dev, index_reg, 0x05);
+ /* DCT Read DQS Timing Control - DIMM1 - Low */
+ val = Get_NB32_index_wait(dev, index_reg, 0x105);
+ if (val != dword)
+ goto Trdrd_1;
+
+ /* DCT Read DQS Timing Control - DIMM0 - High */
+ dword = Get_NB32_index_wait(dev, index_reg, 0x06);
+ /* DCT Read DQS Timing Control - DIMM1 - High */
+ val = Get_NB32_index_wait(dev, index_reg, 0x106);
+ if (val != dword)
+ goto Trdrd_1;
+ }
+ }
+
+ /* DqsRcvEn difference between any two DIMMs is
+ less than half of a MEMCLK */
+ /* DqsRcvEn byte 1,0*/
+ if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x10))
+ goto Trdrd_1;
+ /* DqsRcvEn byte 3,2*/
+ if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x11))
+ goto Trdrd_1;
+ /* DqsRcvEn byte 5,4*/
+ if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x20))
+ goto Trdrd_1;
+ /* DqsRcvEn byte 7,6*/
+ if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x21))
+ goto Trdrd_1;
+ /* DqsRcvEn ECC*/
+ if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x12))
+ goto Trdrd_1;
+ Trdrd = 0;
+ Trdrd_1:
+ ;
+ }
+ pDCTstat->Trdrd = Trdrd;
+
+}
+
+
+static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u8 Twrwr = 0;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u32 dev = pDCTstat->dev_dct;
+ u32 val;
+ u32 dword;
+
+ /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
+ DDR800 have two set register for DIMM0 and DIMM1 */
+ if (pDCTstat->Speed > 3) {
+ val = bsf(pDCTstat->DIMMValid);
+ dword = bsr(pDCTstat->DIMMValid);
+ if (dword != val && dword != 0) {
+ /*the largest WrDatGrossDlyByte of any DIMM minus the
+ WrDatGrossDlyByte of any other DIMM is equal to CGDD */
+ val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
+ }
+ if (val == 0)
+ Twrwr = 2;
+ else
+ Twrwr = 3;
+ }
+ pDCTstat->Twrwr = Twrwr;
+}
+
+
+static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u8 byte, bytex;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u32 dev = pDCTstat->dev_dct;
+
+ /* On any given byte lane, the largest WrDatGrossDlyByte delay of
+ any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
+ equal to the Critical Gross Delay Difference (CGDD) for Twrrd.*/
+ pDCTstat->Twrrd = 0;
+ Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
+ Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
+ bytex = pDCTstat->DqsRcvEnGrossL;
+ byte = pDCTstat->WrDatGrossH;
+ if (byte > bytex) {
+ byte -= bytex;
+ if (byte == 1)
+ bytex = 1;
+ else
+ bytex = 2;
+ } else {
+ bytex = 0;
+ }
+ pDCTstat->Twrrd = bytex;
+}
+
+
+static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u8 byte, bytex;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u32 dev = pDCTstat->dev_dct;
+
+ /* On any given byte lane, the largest WrDatGrossDlyByte delay of
+ any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
+ equal to the Critical Gross Delay Difference (CGDD) for TrwtTO. */
+ Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
+ Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
+ bytex = pDCTstat->DqsRcvEnGrossL;
+ byte = pDCTstat->WrDatGrossH;
+ if (bytex > byte) {
+ bytex -= byte;
+ if ((bytex == 1) || (bytex == 2))
+ bytex = 3;
+ else
+ bytex = 4;
+ } else {
+ byte -= bytex;
+ if ((byte == 0) || (byte == 1))
+ bytex = 2;
+ else
+ bytex = 1;
+ }
+
+ pDCTstat->TrwtTO = bytex;
+}
+
+
+static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ /* TrwtWB ensures read-to-write data-bus turnaround.
+ This value should be one more than the programmed TrwtTO.*/
+ pDCTstat->TrwtWB = pDCTstat->TrwtTO + 1;
+}
+
+
+static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat,
+ u8 dct, u32 dev, u32 index_reg,
+ u32 index)
+{
+ u8 Smallest_0, Largest_0, Smallest_1, Largest_1;
+ u8 i;
+ u32 val;
+ u8 byte;
+
+ Smallest_0 = 0xFF;
+ Smallest_1 = 0xFF;
+ Largest_0 = 0;
+ Largest_1 = 0;
+
+ for (i=0; i < 8; i+=2) {
+ if ( pDCTstat->DIMMValid & (1 << i)) {
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ byte = val & 0xFF;
+ if (byte < Smallest_0)
+ Smallest_0 = byte;
+ if (byte > Largest_0)
+ Largest_0 = byte;
+ byte = (val >> 16) & 0xFF;
+ if (byte < Smallest_1)
+ Smallest_1 = byte;
+ if (byte > Largest_1)
+ Largest_1 = byte;
+ }
+ index += 3;
+ } /* while ++i */
+
+ /* check if total DqsRcvEn delay difference between any
+ two DIMMs is less than half of a MEMCLK */
+ if ((Largest_0 - Smallest_0) > 31)
+ return 1;
+ if ((Largest_1 - Smallest_1) > 31)
+ return 1;
+ return 0;
+}
+
+
+static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
+ u32 dev, u32 index_reg)
+{
+ u8 Smallest, Largest;
+ u32 val;
+ u8 byte, bytex;
+
+ /* The largest DqsRcvEnGrossDelay of any DIMM minus the
+ DqsRcvEnGrossDelay of any other DIMM is equal to the Critical
+ Gross Delay Difference (CGDD) */
+ /* DqsRcvEn byte 1,0 */
+ val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x10);
+ Largest = val & 0xFF;
+ Smallest = (val >> 8) & 0xFF;
+
+ /* DqsRcvEn byte 3,2 */
+ val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x11);
+ byte = val & 0xFF;
+ bytex = (val >> 8) & 0xFF;
+ if (bytex < Smallest)
+ Smallest = bytex;
+ if (byte > Largest)
+ Largest = byte;
+
+ /* DqsRcvEn byte 5,4 */
+ val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x20);
+ byte = val & 0xFF;
+ bytex = (val >> 8) & 0xFF;
+ if (bytex < Smallest)
+ Smallest = bytex;
+ if (byte > Largest)
+ Largest = byte;
+
+ /* DqsRcvEn byte 7,6 */
+ val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x21);
+ byte = val & 0xFF;
+ bytex = (val >> 8) & 0xFF;
+ if (bytex < Smallest)
+ Smallest = bytex;
+ if (byte > Largest)
+ Largest = byte;
+
+ if (pDCTstat->DimmECCPresent> 0) {
+ /*DqsRcvEn Ecc */
+ val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x12);
+ byte = val & 0xFF;
+ bytex = (val >> 8) & 0xFF;
+ if (bytex < Smallest)
+ Smallest = bytex;
+ if (byte > Largest)
+ Largest = byte;
+ }
+
+ pDCTstat->DqsRcvEnGrossL = Largest;
+ return Largest - Smallest;
+}
+
+
+static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat,
+ u8 dct, u32 dev, u32 index_reg)
+{
+ u8 Smallest, Largest;
+ u32 val;
+ u8 byte, bytex;
+
+ /* The largest WrDatGrossDlyByte of any DIMM minus the
+ WrDatGrossDlyByte of any other DIMM is equal to CGDD */
+ val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x01); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
+ Largest = val & 0xFF;
+ Smallest = (val >> 8) & 0xFF;
+ val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x101); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM1 */
+ byte = val & 0xFF;
+ bytex = (val >> 8) & 0xFF;
+ if (bytex < Smallest)
+ Smallest = bytex;
+ if (byte > Largest)
+ Largest = byte;
+
+ // FIXME: Add Cx support.
+
+ pDCTstat->WrDatGrossH = Largest;
+ return Largest - Smallest;
+}
+
+static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
+ u32 dev, u32 index_reg,
+ u32 index)
+{
+ u8 Smallest, Largest;
+ u8 i;
+ u8 byte;
+ u32 val;
+ u16 word;
+
+ Smallest = 7;
+ Largest = 0;
+
+ for (i=0; i < 8; i+=2) {
+ if ( pDCTstat->DIMMValid & (1 << i)) {
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ val &= 0x00E000E0;
+ byte = (val >> 5) & 0xFF;
+ if (byte < Smallest)
+ Smallest = byte;
+ if (byte > Largest)
+ Largest = byte;
+ byte = (val >> (16 + 5)) & 0xFF;
+ if (byte < Smallest)
+ Smallest = byte;
+ if (byte > Largest)
+ Largest = byte;
+ }
+ index += 3;
+ } /* while ++i */
+
+ word = Smallest;
+ word <<= 8;
+ word |= Largest;
+
+ return word;
+}
+
+static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
+ u8 dct, u32 dev, u32 index_reg,
+ u32 index)
+{
+ u8 Smallest, Largest;
+ u8 i, j;
+ u32 val;
+ u8 byte;
+ u16 word;
+
+ Smallest = 3;
+ Largest = 0;
+ for (i=0; i < 2; i++) {
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ val &= 0x60606060;
+ val >>= 5;
+ for (j=0; j < 4; j++) {
+ byte = val & 0xFF;
+ if (byte < Smallest)
+ Smallest = byte;
+ if (byte > Largest)
+ Largest = byte;
+ val >>= 8;
+ } /* while ++j */
+ index++;
+ } /*while ++i*/
+
+ if (pDCTstat->DimmECCPresent > 0) {
+ index++;
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ val &= 0x00000060;
+ val >>= 5;
+ byte = val & 0xFF;
+ if (byte < Smallest)
+ Smallest = byte;
+ if (byte > Largest)
+ Largest = byte;
+ }
+
+ word = Smallest;
+ word <<= 8;
+ word |= Largest;
+
+ return word;
+}
+
+
+
+static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ print_t("\tmct_FinalMCT_D: Clr Cl, Wb\n");
+
+
+ mct_ClrClToNB_D(pMCTstat, pDCTstat);
+ mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
+}
+
+
+static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat)
+{
+ print_t("\tmct_InitialMCT_D: Set Cl, Wb\n");
+ mct_SetClToNB_D(pMCTstat, pDCTstat);
+ mct_SetWbEnhWsbDis_D(pMCTstat, pDCTstat);
+}
+
+
+static u32 mct_NodePresent_D(void)
+{
+ u32 val;
+ val = 0x12001022;
+ return val;
+}
+
+
+static void mct_init(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 lo, hi;
+ u32 addr;
+
+ pDCTstat->GangedMode = 0;
+ pDCTstat->DRPresent = 1;
+
+ /* enable extend PCI configuration access */
+ addr = 0xC001001F;
+ _RDMSR(addr, &lo, &hi);
+ if (hi & (1 << (46-32))) {
+ pDCTstat->Status |= 1 << SB_ExtConfig;
+ } else {
+ hi |= 1 << (46-32);
+ _WRMSR(addr, lo, hi);
+ }
+}
+
+
+static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 reg;
+ u32 val;
+ u32 dev = pDCTstat->dev_dct;
+
+ /* Clear Legacy BIOS Mode bit */
+ reg = 0x94;
+ val = Get_NB32(dev, reg);
+ val &= ~(1<<LegacyBiosMode);
+ Set_NB32(dev, reg, val);
+}
+
+
+static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node;
+ u32 Drambase, Dramlimit;
+ u32 val;
+ u32 reg;
+ u32 dev;
+ u32 devx;
+ u32 dword;
+ struct DCTStatStruc *pDCTstat;
+
+ pDCTstat = pDCTstatA + 0;
+ dev = pDCTstat->dev_map;
+
+ /* Copy dram map from F1x40/44,F1x48/4c,
+ to F1x120/124(Node0),F1x120/124(Node1),...*/
+ for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
+ pDCTstat = pDCTstatA + Node;
+ devx = pDCTstat->dev_map;
+
+ /* get base/limit from Node0 */
+ reg = 0x40 + (Node << 3); /* Node0/Dram Base 0 */
+ val = Get_NB32(dev, reg);
+ Drambase = val >> ( 16 + 3);
+
+ reg = 0x44 + (Node << 3); /* Node0/Dram Base 0 */
+ val = Get_NB32(dev, reg);
+ Dramlimit = val >> (16 + 3);
+
+ /* set base/limit to F1x120/124 per Node */
+ if (pDCTstat->NodePresent) {
+ reg = 0x120; /* F1x120,DramBase[47:27] */
+ val = Get_NB32(devx, reg);
+ val &= 0xFFE00000;
+ val |= Drambase;
+ Set_NB32(devx, reg, val);
+
+ reg = 0x124;
+ val = Get_NB32(devx, reg);
+ val &= 0xFFE00000;
+ val |= Dramlimit;
+ Set_NB32(devx, reg, val);
+
+ if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
+ reg = 0xF0;
+ val = Get_NB32(devx, reg);
+ val |= (1 << DramMemHoistValid);
+ val &= ~(0xFF << 24);
+ dword = (pMCTstat->HoleBase >> (24 - 8)) & 0xFF;
+ dword <<= 24;
+ val |= dword;
+ Set_NB32(devx, reg, val);
+ }
+
+ }
+ }
+}
+
+static void SetCSTriState(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 val;
+ u32 dev = pDCTstat->dev_dct;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u8 cs;
+ u32 index;
+ u16 word;
+
+ /* Tri-state unused chipselects when motherboard
+ termination is available */
+
+ // FIXME: skip for Ax
+
+ word = pDCTstat->CSPresent;
+ if (pDCTstat->Status & (1 << SB_Registered)) {
+ for (cs = 0; cs < 8; cs++) {
+ if (word & (1 << cs)) {
+ if (!(cs & 1))
+ word |= 1 << (cs + 1);
+ }
+ }
+ }
+ word = (~word) & 0xFF;
+ index = 0x0c;
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ val |= word;
+ Set_NB32_index_wait(dev, index_reg, index, val);
+}
+
+
+
+static void SetCKETriState(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 val;
+ u32 dev;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u8 cs;
+ u32 index;
+ u16 word;
+
+ /* Tri-state unused CKEs when motherboard termination is available */
+
+ // FIXME: skip for Ax
+
+ dev = pDCTstat->dev_dct;
+ word = 0x101;
+ for (cs = 0; cs < 8; cs++) {
+ if (pDCTstat->CSPresent & (1 << cs)) {
+ if (!(cs & 1))
+ word &= 0xFF00;
+ else
+ word &= 0x00FF;
+ }
+ }
+
+ index = 0x0c;
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ if ((word & 0x00FF) == 1)
+ val |= 1 << 12;
+ else
+ val &= ~(1 << 12);
+
+ if ((word >> 8) == 1)
+ val |= 1 << 13;
+ else
+ val &= ~(1 << 13);
+
+ Set_NB32_index_wait(dev, index_reg, index, val);
+}
+
+
+static void SetODTTriState(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 val;
+ u32 dev;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u8 cs;
+ u32 index;
+ u16 word;
+
+ /* Tri-state unused ODTs when motherboard termination is available */
+
+ // FIXME: skip for Ax
+
+ dev = pDCTstat->dev_dct;
+ word = 0;
+ for (cs = 0; cs < 8; cs += 2) {
+ if (!(pDCTstat->CSPresent & (1 << cs))) {
+ if (!(pDCTstat->CSPresent & (1 << (cs + 1))))
+ word |= (1 << (cs >> 1));
+ }
+ }
+
+ index = 0x0C;
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ val |= (word << 8);
+ Set_NB32_index_wait(dev, index_reg, index, val);
+}
+
+
+static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u8 i;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u32 dev = pDCTstat->dev_dct;
+ u32 val;
+ u32 valx = 0;
+ u32 dword;
+ const u8 *p;
+
+ val = Get_NB32_index_wait(dev, index_reg, 0x00);
+ dword = 0;
+ for (i=0; i < 6; i++) {
+ switch (i) {
+ case 0:
+ case 4:
+ p = Table_Comp_Rise_Slew_15x;
+ valx = p[(val >> 16) & 3];
+ break;
+ case 1:
+ case 5:
+ p = Table_Comp_Fall_Slew_15x;
+ valx = p[(val >> 16) & 3];
+ break;
+ case 2:
+ p = Table_Comp_Rise_Slew_20x;
+ valx = p[(val >> 8) & 3];
+ break;
+ case 3:
+ p = Table_Comp_Fall_Slew_20x;
+ valx = p[(val >> 8) & 3];
+ break;
+
+ }
+ dword |= valx << (5 * i);
+ }
+
+ /* Override/Exception */
+ if ((pDCTstat->Speed == 2) && (pDCTstat->MAdimms[dct] == 4))
+ dword &= 0xF18FFF18;
+
+ Set_NB32_index_wait(dev, index_reg, 0x0a, dword);
+}
+
+
+static void WaitRoutine_D(u32 time)
+{
+ while(time) {
+ _EXECFENCE;
+ time--;
+ }
+}
+
+
+static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 reg;
+ u32 val;
+ u32 dev = pDCTstat->dev_dct;
+
+ /* GhEnhancement #18429 modified by askar: For low NB CLK :
+ * Memclk ratio, the DCT may need to arbitrate early to avoid
+ * unnecessary bubbles.
+ * bit 19 of F2x[1,0]78 Dram Control Register, set this bit only when
+ * NB CLK : Memclk ratio is between 3:1 (inclusive) to 4:5 (inclusive)
+ */
+
+ reg = 0x78;
+ val = Get_NB32(dev, reg);
+
+ //FIXME: check for Cx
+ if (CheckNBCOFEarlyArbEn(pMCTstat, pDCTstat))
+ val |= (1 << EarlyArbEn);
+
+ Set_NB32(dev, reg, val);
+
+}
+
+
+static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 reg;
+ u32 val;
+ u32 tmp;
+ u32 rem;
+ u32 dev = pDCTstat->dev_dct;
+ u32 hi, lo;
+ u8 NbDid = 0;
+
+ /* Check if NB COF >= 4*Memclk, if it is not, return a fatal error
+ */
+
+ /* 3*(Fn2xD4[NBFid]+4)/(2^NbDid)/(3+Fn2x94[MemClkFreq]) */
+ _RDMSR(0xC0010071, &lo, &hi);
+ if (lo & (1 << 22))
+ NbDid |= 1;
+
+
+ reg = 0x94;
+ val = Get_NB32(dev, reg);
+ if (!(val & (1 << MemClkFreqVal)))
+ val = Get_NB32(dev, reg * 0x100); /* get the DCT1 value */
+
+ val &= 0x07;
+ val += 3;
+ if (NbDid)
+ val <<= 1;
+ tmp = val;
+
+ dev = pDCTstat->dev_nbmisc;
+ reg = 0xD4;
+ val = Get_NB32(dev, reg);
+ val &= 0x1F;
+ val += 3;
+ val *= 3;
+ val = val / tmp;
+ rem = val % tmp;
+ tmp >>= 1;
+
+ // Yes this could be nicer but this was how the asm was....
+ if (val < 3) { /* NClk:MemClk < 3:1 */
+ return 0;
+ } else if (val > 4) { /* NClk:MemClk >= 5:1 */
+ return 0;
+ } else if ((val == 4) && (rem > tmp)) { /* NClk:MemClk > 4.5:1 */
+ return 0;
+ } else {
+ return 1; /* 3:1 <= NClk:MemClk <= 4.5:1*/
+ }
+}
+
+
+static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node;
+ u32 i;
+ struct DCTStatStruc *pDCTstat;
+ u16 start, stop;
+ u8 *p;
+ u16 host_serv1, host_serv2;
+
+ /* Initialize Data structures by clearing all entries to 0 */
+ p = (u8 *) pMCTstat;
+ for (i = 0; i < sizeof(struct MCTStatStruc); i++) {
+ p[i] = 0;
+ }
+
+ for (Node = 0; Node < 8; Node++) {
+ pDCTstat = pDCTstatA + Node;
+ host_serv1 = pDCTstat->HostBiosSrvc1;
+ host_serv2 = pDCTstat->HostBiosSrvc2;
+
+ p = (u8 *) pDCTstat;
+ start = 0;
+ stop = ((u16) &((struct DCTStatStruc *)0)->CH_MaxRdLat[2]);
+ for (i = start; i < stop ; i++) {
+ p[i] = 0;
+ }
+
+ start = ((u16) &((struct DCTStatStruc *)0)->CH_D_BC_RCVRDLY[2][4]);
+ stop = sizeof(struct DCTStatStruc);
+ for (i = start; i < stop; i++) {
+ p[i] = 0;
+ }
+ pDCTstat->HostBiosSrvc1 = host_serv1;
+ pDCTstat->HostBiosSrvc2 = host_serv2;
+ }
+}
+
+
+static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u8 i;
+ u32 reg_off;
+ u32 dev = pDCTstat->dev_dct;
+
+ // FIXME: skip for Ax
+ if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
+ for (i=0; i < 2; i++) {
+ reg_off = 0x100 * i;
+ Set_NB32(dev, 0x98 + reg_off, 0x0D000030);
+ Set_NB32(dev, 0x9C + reg_off, 0x00000806);
+ Set_NB32(dev, 0x98 + reg_off, 0x4D040F30);
+ }
+ }
+}
+
+
+void mct_AdjustDelayRange_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 *dqs_pos)
+{
+ // FIXME: Skip for Ax
+ if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
+ *dqs_pos = 32;
+ }
+}
+
+
+void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 lo, hi;
+ u32 msr;
+
+ // FIXME: Maybe check the CPUID? - not for now.
+ // pDCTstat->LogicalCPUID;
+
+ msr = BU_CFG2;
+ _RDMSR(msr, &lo, &hi);
+ lo |= 1 << ClLinesToNbDis;
+ _WRMSR(msr, lo, hi);
+}
+
+
+void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+
+ u32 lo, hi;
+ u32 msr;
+
+ // FIXME: Maybe check the CPUID? - not for now.
+ // pDCTstat->LogicalCPUID;
+
+ msr = BU_CFG2;
+ _RDMSR(msr, &lo, &hi);
+ if (!pDCTstat->ClToNB_flag)
+ lo &= ~(1<<ClLinesToNbDis);
+ _WRMSR(msr, lo, hi);
+
+}
+
+
+void mct_SetWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 lo, hi;
+ u32 msr;
+
+ // FIXME: Maybe check the CPUID? - not for now.
+ // pDCTstat->LogicalCPUID;
+
+ msr = BU_CFG;
+ _RDMSR(msr, &lo, &hi);
+ hi |= (1 << WbEnhWsbDis_D);
+ _WRMSR(msr, lo, hi);
+}
+
+
+void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 lo, hi;
+ u32 msr;
+
+ // FIXME: Maybe check the CPUID? - not for now.
+ // pDCTstat->LogicalCPUID;
+
+ msr = BU_CFG;
+ _RDMSR(msr, &lo, &hi);
+ hi &= ~(1 << WbEnhWsbDis_D);
+ _WRMSR(msr, lo, hi);
+}
+
+
+void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct,
+ u32 DramConfigHi)
+{
+ /* Bug#15114: Comp. update interrupted by Freq. change can cause
+ * subsequent update to be invalid during any MemClk frequency change:
+ * Solution: From the bug report:
+ * 1. A software-initiated frequency change should be wrapped into the
+ * following sequence :
+ * - a) Disable Compensation (F2[1, 0]9C_x08[30] )
+ * b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
+ * c) Do frequency change
+ * d) Enable Compensation (F2[1, 0]9C_x08[30] )
+ * 2. A software-initiated Disable Compensation should always be
+ * followed by step b) of the above steps.
+ * Silicon Status: Fixed In Rev B0
+ *
+ * Errata#177: DRAM Phy Automatic Compensation Updates May Be Invalid
+ * Solution: BIOS should disable the phy automatic compensation prior
+ * to initiating a memory clock frequency change as follows:
+ * 1. Disable PhyAutoComp by writing 1'b1 to F2x[1, 0]9C_x08[30]
+ * 2. Reset the Begin Compensation bits by writing 32'h0 to
+ * F2x[1, 0]9C_x4D004F00
+ * 3. Perform frequency change
+ * 4. Enable PhyAutoComp by writing 1'b0 to F2x[1, 0]9C_08[30]
+ * In addition, any time software disables the automatic phy
+ * compensation it should reset the begin compensation bit per step 2.
+ * Silicon Status: Fixed in DR-B0
+ */
+
+ u32 dev = pDCTstat->dev_dct;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u32 index;
+
+ u32 val;
+
+ index = 0x08;
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ Set_NB32_index_wait(dev, index_reg, index, val | (1 << DisAutoComp));
+
+ //FIXME: check for Bx Cx CPU
+ // if Ax mct_SetDramConfigHi_Samp_D
+
+ /* errata#177 */
+ index = 0x4D014F00; /* F2x[1, 0]9C_x[D0FFFFF:D000000] DRAM Phy Debug Registers */
+ index |= 1 << DctAccessWrite;
+ val = 0;
+ Set_NB32_index_wait(dev, index_reg, index, val);
+
+ Set_NB32(dev, 0x94 + 0x100 * dct, DramConfigHi);
+
+ index = 0x08;
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ Set_NB32_index_wait(dev, index_reg, index, val & (~(1 << DisAutoComp)));
+}
+
+static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node;
+ struct DCTStatStruc *pDCTstat;
+
+ /* Errata 178
+ *
+ * Bug#15115: Uncertainty In The Sync Chain Leads To Setup Violations
+ * In TX FIFO
+ * Solution: BIOS should program DRAM Control Register[RdPtrInit] =
+ * 5h, (F2x[1, 0]78[3:0] = 5h).
+ * Silicon Status: Fixed In Rev B0
+ *
+ * Bug#15880: Determine validity of reset settings for DDR PHY timing.
+ * Solutiuon: At least, set WrDqs fine delay to be 0 for DDR2 training.
+ */
+
+ for (Node = 0; Node < 8; Node++) {
+ pDCTstat = pDCTstatA + Node;
+
+ if (pDCTstat->NodePresent)
+ mct_BeforeDQSTrain_Samp_D(pMCTstat, pDCTstat);
+ mct_ResetDLL_D(pMCTstat, pDCTstat, 0);
+ mct_ResetDLL_D(pMCTstat, pDCTstat, 1);
+
+ }
+}
+
+static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u8 Receiver;
+ u32 val;
+ u32 dev = pDCTstat->dev_dct;
+ u32 reg_off = 0x100 * dct;
+ u32 addr;
+ u8 valid = 0;
+
+ pDCTstat->Channel = dct;
+ Receiver = mct_InitReceiver_D(pDCTstat, dct);
+ /* there are four receiver pairs, loosely associated with chipselects.*/
+ for (; Receiver < 8; Receiver += 2) {
+ if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, dct, Receiver)) {
+ addr = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, dct, Receiver, &valid);
+ if (valid) {
+ mct_Read1LTestPattern_D(pMCTstat, pDCTstat, addr); /* cache fills */
+ Set_NB32(dev, 0x98 + reg_off, 0x0D00000C);
+ val = Get_NB32(dev, 0x9C + reg_off);
+ val |= 1 << 15;
+ Set_NB32(dev, 0x9C + reg_off, val);
+ Set_NB32(dev, 0x98 + reg_off, 0x4D0F0F0C);
+ mct_Wait_10ns(60); /* wait >= 300ns */
+
+ Set_NB32(dev, 0x98 + reg_off, 0x0D00000C);
+ val = Get_NB32(dev, 0x9C + reg_off);
+ val &= ~(1 << 15);
+ Set_NB32(dev, 0x9C + reg_off, val);
+ Set_NB32(dev, 0x98 + reg_off, 0x4D0F0F0C);
+ mct_Wait_10ns(400); /* wait >= 2us */
+ break;
+ }
+ }
+ }
+}
+
+
+static void mct_EnableDatIntlv_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u32 dev = pDCTstat->dev_dct;
+ u32 val;
+
+ /* Enable F2x110[DctDatIntlv] */
+ // Call back not required mctHookBeforeDatIntlv_D()
+ // FIXME Skip for Ax
+ if (!pDCTstat->GangedMode) {
+ val = Get_NB32(dev, 0x110);
+ val |= 1 << 5; // DctDatIntlv
+ Set_NB32(dev, 0x110, val);
+
+ // FIXME Skip for Cx
+ dev = pDCTstat->dev_nbmisc;
+ val = Get_NB32(dev, 0x8C); // NB Configuration Hi
+ val |= 36-32; // DisDatMask
+ Set_NB32(dev, 0x8C, val);
+ }
+}
+
+
+static void mct_SetupSync_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ /* set F2x78[ChSetupSync] when F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup,
+ * CkeSetup] setups for one DCT are all 0s and at least one of the setups,
+ * F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup, CkeSetup], of the other
+ * controller is 1
+ */
+ u32 cha, chb;
+ u32 dev = pDCTstat->dev_dct;
+ u32 val;
+
+ cha = pDCTstat->CH_ADDR_TMG[0] & 0x0202020;
+ chb = pDCTstat->CH_ADDR_TMG[1] & 0x0202020;
+
+ if ((cha != chb) && ((cha == 0) || (chb == 0))) {
+ val = Get_NB32(dev, 0x78);
+ val |= ChSetupSync;
+ Set_NB32(dev, 0x78, val);
+ }
+}
+
+static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
+
+ u32 val;
+ u32 reg_off = 0x100 * dct;
+ u32 dev = pDCTstat->dev_dct;
+
+ if (pDCTstat->LogicalCPUID & AMD_DR_B2) {
+ mct_Wait_10ns(5000); /* Wait 50 us*/
+ val = Get_NB32(dev, 0x110);
+ if ( val & (1 << DramEnabled)) {
+ /* If 50 us expires while DramEnable =0 then do the following */
+ val = Get_NB32(dev, 0x90 + reg_off);
+ val &= ~(1 << Width128); /* Program Width128 = 0 */
+ Set_NB32(dev, 0x90 + reg_off, val);
+
+ val = Get_NB32_index_wait(dev, 0x98 + reg_off, 0x05); /* Perform dummy CSR read to F2x09C_x05 */
+
+ if (pDCTstat->GangedMode) {
+ val = Get_NB32(dev, 0x90 + reg_off);
+ val |= 1 << Width128; /* Program Width128 = 0 */
+ Set_NB32(dev, 0x90 + reg_off, val);
+ }
+ }
+ }
+}
+
+
+/* ==========================================================
+ * 6-bit Bank Addressing Table
+ * RR=rows-13 binary
+ * B=Banks-2 binary
+ * CCC=Columns-9 binary
+ * ==========================================================
+ * DCT CCCBRR Rows Banks Columns 64-bit CS Size
+ * Encoding
+ * 0000 000000 13 2 9 128MB
+ * 0001 001000 13 2 10 256MB
+ * 0010 001001 14 2 10 512MB
+ * 0011 010000 13 2 11 512MB
+ * 0100 001100 13 3 10 512MB
+ * 0101 001101 14 3 10 1GB
+ * 0110 010001 14 2 11 1GB
+ * 0111 001110 15 3 10 2GB
+ * 1000 010101 14 3 11 2GB
+ * 1001 010110 15 3 11 4GB
+ * 1010 001111 16 3 10 4GB
+ * 1011 010111 16 3 11 8GB
+ */
diff --git a/src/northbridge/amd/amdmct/mct/mct_d.h b/src/northbridge/amd/amdmct/mct/mct_d.h
new file mode 100644
index 0000000000..07de9ac564
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mct_d.h
@@ -0,0 +1,737 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Description: Include file for all generic DDR 2 MCT files.
+ */
+#ifndef MCT_D_H
+#define MCT_D_H
+
+
+
+/*===========================================================================
+ CPU - K8/FAM10
+===========================================================================*/
+#define PT_L1 0 /* CPU Package Type */
+#define PT_M2 1
+#define PT_S1 2
+#define PT_GR 3
+
+#define J_MIN 0 /* j loop constraint. 1=CL 2.0 T*/
+#define J_MAX 5 /* j loop constraint. 5=CL 7.0 T*/
+#define K_MIN 1 /* k loop constraint. 1=200 Mhz*/
+#define K_MAX 5 /* k loop constraint. 5=533 Mhz*/
+#define CL_DEF 2 /* Default value for failsafe operation. 2=CL 4.0 T*/
+#define T_DEF 1 /* Default value for failsafe operation. 1=5ns (cycle time)*/
+
+#define BSCRate 1 /* reg bit field=rate of dram scrubber for ecc*/
+ /* memory initialization (ecc and check-bits).*/
+ /* 1=40 ns/64 bytes.*/
+#define FirstPass 1 /* First pass through RcvEn training*/
+#define SecondPass 2 /* Second pass through Rcven training*/
+
+#define RCVREN_MARGIN 6 /* number of DLL taps to delay beyond first passing position*/
+#define MAXASYNCLATCTL_2 2 /* Max Async Latency Control value*/
+#define MAXASYNCLATCTL_3 3 /* Max Async Latency Control value*/
+
+#define DQS_FAIL 1
+#define DQS_PASS 0
+#define DQS_WRITEDIR 1
+#define DQS_READDIR 0
+#define MIN_DQS_WNDW 3
+#define secPassOffset 6
+#define Pass1MemClkDly 0x20 /* Add 1/2 Memlock delay */
+#define MAX_RD_LAT 0x3FF
+#define MIN_FENCE 14
+#define MAX_FENCE 20
+#define MIN_DQS_WR_FENCE 14
+#define MAX_DQS_WR_FENCE 20
+#define FenceTrnFinDlySeed 19
+#define EarlyArbEn 19
+
+#define PA_HOST(Node) ((((0x18+Node) << 3)+0) << 12) /* Node 0 Host Bus function PCI Address bits [15:0]*/
+#define PA_MAP(Node) ((((0x18+Node) << 3)+1) << 12) /* Node 0 MAP function PCI Address bits [15:0]*/
+#define PA_DCT(Node) ((((0x18+Node) << 3)+2) << 12) /* Node 0 DCT function PCI Address bits [15:0]*/
+//#define PA_EXT_DCT (((00 << 3)+4) << 8) /*Node 0 DCT extended configuration registers*/
+//#define PA_DCTADDL (((00 << 3)+2) << 8) /*Node x DCT function, Additional Registers PCI Address bits [15:0]*/
+//#define PA_EXT_DCTADDL (((00 << 3)+5) << 8) /*Node x DCT function, Additional Registers PCI Address bits [15:0]*/
+
+#define PA_NBMISC(Node) ((((0x18+Node) << 3)+3) << 12) /*Node 0 Misc PCI Address bits [15:0]*/
+//#define PA_NBDEVOP (((00 << 3)+3) << 8) /*Node 0 Misc PCI Address bits [15:0]*/
+
+#define DCC_EN 1 /* X:2:0x94[19]*/
+#define ILD_Lmt 3 /* X:2:0x94[18:16]*/
+
+#define EncodedTSPD 0x00191709 /* encodes which SPD byte to get T from*/
+ /* versus CL X, CL X-.5, and CL X-1*/
+
+#define Bias_TrpT 3 /* bias to convert bus clocks to bit field value*/
+#define Bias_TrrdT 2
+#define Bias_TrcdT 3
+#define Bias_TrasT 3
+#define Bias_TrcT 11
+#define Bias_TrtpT 2
+#define Bias_TwrT 3
+#define Bias_TwtrT 0
+#define Bias_TfawT 7
+
+#define Min_TrpT 3 /* min programmable value in busclocks*/
+#define Max_TrpT 6 /* max programmable value in busclocks*/
+#define Min_TrrdT 2
+#define Max_TrrdT 5
+#define Min_TrcdT 3
+#define Max_TrcdT 6
+#define Min_TrasT 5
+#define Max_TrasT 18
+#define Min_TrcT 11
+#define Max_TrcT 26
+#define Min_TrtpT 2
+#define Max_TrtpT 3
+#define Min_TwrT 3
+#define Max_TwrT 6
+#define Min_TwtrT 1
+#define Max_TwtrT 3
+
+/*DDR2-1066 support*/
+#define Bias_TrcdT_1066 5
+#define Bias_TrasT_1066 15
+#define Bias_TrrdT_1066 4
+#define Bias_TwrT_1066 4
+#define Bias_TrpT_1066 5
+#define Bias_TwtrT_1066 4
+#define Bias_TfawT_1066 15
+
+#define Min_TrcdT_1066 5
+#define Max_TrcdT_1066 12
+#define Min_TrasT_1066 15
+#define Max_TrasT_1066 30
+#define Min_TrcT_1066 11
+#define Max_TrcT_1066 42
+#define Min_TrrdT_1066 4
+#define Max_TrrdT_1066 7
+#define Min_TwrT_1066 5
+#define Max_TwrT_1066 8
+#define Min_TrpT_1066 5
+#define Max_TrpT_1066 12
+#define Min_TwtrT_1066 4
+#define Max_TwtrT_1066 7
+
+/*common register bit names*/
+#define DramHoleValid 0 /* func 1, offset F0h, bit 0*/
+#define DramMemHoistValid 1 /* func 1, offset F0h, bit 1*/
+#define CSEnable 0 /* func 2, offset 40h-5C, bit 0*/
+#define Spare 1 /* func 2, offset 40h-5C, bit 1*/
+#define TestFail 2 /* func 2, offset 40h-5C, bit 2*/
+#define DqsRcvEnTrain 18 /* func 2, offset 78h, bit 18*/
+#define EnDramInit 31 /* func 2, offset 7Ch, bit 31*/
+#define DisAutoRefresh 18 /* func 2, offset 8Ch, bit 18*/
+#define InitDram 0 /* func 2, offset 90h, bit 0*/
+#define BurstLength32 10 /* func 2, offset 90h, bit 10*/
+#define Width128 11 /* func 2, offset 90h, bit 11*/
+#define X4Dimm 12 /* func 2, offset 90h, bit 12*/
+#define UnBuffDimm 16 /* func 2, offset 90h, bit 16*/
+#define DimmEcEn 19 /* func 2, offset 90h, bit 19*/
+#define MemClkFreqVal 3 /* func 2, offset 94h, bit 3*/
+#define RDqsEn 12 /* func 2, offset 94h, bit 12*/
+#define DisDramInterface 14 /* func 2, offset 94h, bit 14*/
+#define DctAccessWrite 30 /* func 2, offset 98h, bit 30*/
+#define DctAccessDone 31 /* func 2, offset 98h, bit 31*/
+#define MemClrStatus 0 /* func 2, offset A0h, bit 0*/
+#define PwrSavingsEn 10 /* func 2, offset A0h, bit 10*/
+#define Mod64BitMux 4 /* func 2, offset A0h, bit 4*/
+#define DisableJitter 1 /* func 2, offset A0h, bit 1*/
+#define MemClrDis 1 /* func 3, offset F8h, FNC 4, bit 1*/
+#define SyncOnUcEccEn 2 /* func 3, offset 44h, bit 2*/
+#define Dr_MemClrStatus 10 /* func 3, offset 110h, bit 10*/
+#define MemClrBusy 9 /* func 3, offset 110h, bit 9*/
+#define DctGangEn 4 /* func 3, offset 110h, bit 4*/
+#define MemClrInit 3 /* func 3, offset 110h, bit 3*/
+#define AssertCke 28 /* func 2, offset 7Ch, bit 28*/
+#define DeassertMemRstX 27 /* func 2, offset 7Ch, bit 27*/
+#define SendMrsCmd 26 /* func 2, offset 7Ch, bit 26*/
+#define SendAutoRefresh 25 /* func 2, offset 7Ch, bit 25*/
+#define SendPchgAll 24 /* func 2, offset 7Ch, bit 24*/
+#define DisDqsBar 6 /* func 2, offset 90h, bit 6*/
+#define DramEnabled 8 /* func 2, offset 110h, bit 8*/
+#define LegacyBiosMode 9 /* func 2, offset 94h, bit 9*/
+#define PrefDramTrainMode 28 /* func 2, offset 11Ch, bit 28*/
+#define FlushWr 30 /* func 2, offset 11Ch, bit 30*/
+#define DisAutoComp 30 /* func 2, offset 9Ch, Index 8, bit 30*/
+#define DqsRcvTrEn 13 /* func 2, offset 9Ch, Index 8, bit 13*/
+#define ForceAutoPchg 23 /* func 2, offset 90h, bit 23*/
+#define ClLinesToNbDis 15 /* Bu_CFG2, bit 15*/
+#define WbEnhWsbDis_D (48-32)
+#define PhyFenceTrEn 3 /* func 2, offset 9Ch, Index 8, bit 3 */
+#define ParEn 8 /* func 2, offset 90h, bit 8 */
+#define DcqArbBypassEn 19 /* func 2, offset 94h, bit 19 */
+#define ActiveCmdAtRst 1 /* func 2, offset A8H, bit 1 */
+#define FlushWrOnStpGnt 29 /* func 2, offset 11Ch, bit 29 */
+#define BankSwizzleMode 22 /* func 2, offset 94h, bit 22 */
+#define ChSetupSync 15 /* func 2, offset 78h, bit 15 */
+
+
+
+/*=============================================================================
+ SW Initialization
+============================================================================*/
+#define DLL_Enable 1
+#define OCD_Default 2
+#define OCD_Exit 3
+
+
+
+/*=============================================================================
+ Jedec DDR II
+=============================================================================*/
+#define SPD_TYPE 2 /*SPD byte read location*/
+ #define JED_DDRSDRAM 0x07 /*Jedec defined bit field*/
+ #define JED_DDR2SDRAM 0x08 /*Jedec defined bit field*/
+
+#define SPD_DIMMTYPE 20
+#define SPD_ATTRIB 21
+ #define JED_DIFCKMSK 0x20 /*Differential Clock Input*/
+ #define JED_REGADCMSK 0x11 /*Registered Address/Control*/
+ #define JED_PROBEMSK 0x40 /*Analysis Probe installed*/
+#define SPD_DEVATTRIB 22
+#define SPD_EDCTYPE 11
+ #define JED_ECC 0x02
+ #define JED_ADRCPAR 0x04
+#define SPD_ROWSZ 3
+#define SPD_COLSZ 4
+#define SPD_LBANKS 17 /*number of [logical] banks on each device*/
+#define SPD_DMBANKS 5 /*number of physical banks on dimm*/
+ #define SPDPLBit 4 /* Dram package bit*/
+#define SPD_BANKSZ 31 /*capacity of physical bank*/
+#define SPD_DEVWIDTH 13
+#define SPD_CASLAT 18
+#define SPD_TRP 27
+#define SPD_TRRD 28
+#define SPD_TRCD 29
+#define SPD_TRAS 30
+#define SPD_TWR 36
+#define SPD_TWTR 37
+#define SPD_TRTP 38
+#define SPD_TRCRFC 40
+#define SPD_TRC 41
+#define SPD_TRFC 42
+
+#define SPD_MANDATEYR 93 /*Module Manufacturing Year (BCD)*/
+
+#define SPD_MANDATEWK 94 /*Module Manufacturing Week (BCD)*/
+
+/*-----------------------------
+ Jdec DDR II related equates
+-----------------------------*/
+#define MYEAR06 6 /* Manufacturing Year BCD encoding of 2006 - 06d*/
+#define MWEEK24 0x24 /* Manufacturing Week BCD encoding of June - 24d*/
+
+/*=============================================================================
+ Macros
+=============================================================================*/
+
+#define _2GB_RJ8 (2<<(30-8))
+#define _4GB_RJ8 (4<<(30-8))
+#define _4GB_RJ4 (4<<(30-4))
+
+#define BigPagex8_RJ8 (1<<(17+3-8)) /*128KB * 8 >> 8 */
+
+/*=============================================================================
+ Global MCT Status Structure
+=============================================================================*/
+struct MCTStatStruc {
+ u32 GStatus; /* Global Status bitfield*/
+ u32 HoleBase; /* If not zero, BASE[39:8] (system address)
+ of sub 4GB dram hole for HW remapping.*/
+ u32 Sub4GCacheTop; /* If not zero, the 32-bit top of cacheable memory.*/
+ u32 SysLimit; /* LIMIT[39:8] (system address)*/
+};
+
+/*=============================================================================
+ Global MCT Configuration Status Word (GStatus)
+=============================================================================*/
+/*These should begin at bit 0 of GStatus[31:0]*/
+#define GSB_MTRRshort 0 /* Ran out of MTRRs while mapping memory*/
+#define GSB_ECCDIMMs 1 /* All banks of all Nodes are ECC capable*/
+#define GSB_DramECCDis 2 /* Dram ECC requested but not enabled.*/
+#define GSB_SoftHole 3 /* A Node Base gap was created*/
+#define GSB_HWHole 4 /* A HW dram remap was created*/
+#define GSB_NodeIntlv 5 /* Node Memory interleaving was enabled*/
+#define GSB_SpIntRemapHole 16 /* Special condition for Node Interleave and HW remapping*/
+#define GSB_EnDIMMSpareNW 17 /* Indicates that DIMM Spare can be used without a warm reset */
+ /* NOTE: This is a local bit used by memory code */
+
+
+/*===============================================================================
+ Local DCT Status structure (a structure for each DCT)
+===============================================================================*/
+
+struct DCTStatStruc { /* A per Node structure*/
+/* DCTStatStruct_F - start */
+ u8 Node_ID; /* Node ID of current controller*/
+ u8 ErrCode; /* Current error condition of Node
+ 0= no error
+ 1= Variance Error, DCT is running but not in an optimal configuration.
+ 2= Stop Error, DCT is NOT running
+ 3= Fatal Error, DCT/MCT initialization has been halted.*/
+ u32 ErrStatus; /* Error Status bit Field */
+ u32 Status; /* Status bit Field*/
+ u8 DIMMAddr[8]; /* SPD address of DIMM controlled by MA0_CS_L[0,1]*/
+ /* SPD address of..MB0_CS_L[0,1]*/
+ /* SPD address of..MA1_CS_L[0,1]*/
+ /* SPD address of..MB1_CS_L[0,1]*/
+ /* SPD address of..MA2_CS_L[0,1]*/
+ /* SPD address of..MB2_CS_L[0,1]*/
+ /* SPD address of..MA3_CS_L[0,1]*/
+ /* SPD address of..MB3_CS_L[0,1]*/
+ u16 DIMMPresent; /*For each bit n 0..7, 1=DIMM n is present.
+ DIMM# Select Signal
+ 0 MA0_CS_L[0,1]
+ 1 MB0_CS_L[0,1]
+ 2 MA1_CS_L[0,1]
+ 3 MB1_CS_L[0,1]
+ 4 MA2_CS_L[0,1]
+ 5 MB2_CS_L[0,1]
+ 6 MA3_CS_L[0,1]
+ 7 MB3_CS_L[0,1]*/
+ u16 DIMMValid; /* For each bit n 0..7, 1=DIMM n is valid and is/will be configured*/
+ u16 DIMMMismatch; /* For each bit n 0..7, 1=DIMM n is mismatched, channel B is always considered the mismatch */
+ u16 DIMMSPDCSE; /* For each bit n 0..7, 1=DIMM n SPD checksum error*/
+ u16 DimmECCPresent; /* For each bit n 0..7, 1=DIMM n is ECC capable.*/
+ u16 DimmPARPresent; /* For each bit n 0..7, 1=DIMM n is ADR/CMD Parity capable.*/
+ u16 Dimmx4Present; /* For each bit n 0..7, 1=DIMM n contains x4 data devices.*/
+ u16 Dimmx8Present; /* For each bit n 0..7, 1=DIMM n contains x8 data devices.*/
+ u16 Dimmx16Present; /* For each bit n 0..7, 1=DIMM n contains x16 data devices.*/
+ u16 DIMM2Kpage; /* For each bit n 0..7, 1=DIMM n contains 1K page devices.*/
+ u8 MAload[2]; /* Number of devices loading MAA bus*/
+ /* Number of devices loading MAB bus*/
+ u8 MAdimms[2]; /*Number of DIMMs loading CH A*/
+ /* Number of DIMMs loading CH B*/
+ u8 DATAload[2]; /*Number of ranks loading CH A DATA*/
+ /* Number of ranks loading CH B DATA*/
+ u8 DIMMAutoSpeed; /*Max valid Mfg. Speed of DIMMs
+ 1=200Mhz
+ 2=266Mhz
+ 3=333Mhz
+ 4=400Mhz
+ 5=533Mhz*/
+ u8 DIMMCASL; /* Min valid Mfg. CL bitfield
+ 0=2.0
+ 1=3.0
+ 2=4.0
+ 3=5.0
+ 4=6.0 */
+ u16 DIMMTrcd; /* Minimax Trcd*40 (ns) of DIMMs*/
+ u16 DIMMTrp; /* Minimax Trp*40 (ns) of DIMMs*/
+ u16 DIMMTrtp; /* Minimax Trtp*40 (ns) of DIMMs*/
+ u16 DIMMTras; /* Minimax Tras*40 (ns) of DIMMs*/
+ u16 DIMMTrc; /* Minimax Trc*40 (ns) of DIMMs*/
+ u16 DIMMTwr; /* Minimax Twr*40 (ns) of DIMMs*/
+ u16 DIMMTrrd; /* Minimax Trrd*40 (ns) of DIMMs*/
+ u16 DIMMTwtr; /* Minimax Twtr*40 (ns) of DIMMs*/
+ u8 Speed; /* Bus Speed (to set Controller)
+ 1=200Mhz
+ 2=266Mhz
+ 3=333Mhz
+ 4=400Mhz */
+ u8 CASL; /* CAS latency DCT setting
+ 0=2.0
+ 1=3.0
+ 2=4.0
+ 3=5.0
+ 4=6.0 */
+ u8 Trcd; /* DCT Trcd (busclocks) */
+ u8 Trp; /* DCT Trp (busclocks) */
+ u8 Trtp; /* DCT Trtp (busclocks) */
+ u8 Tras; /* DCT Tras (busclocks) */
+ u8 Trc; /* DCT Trc (busclocks) */
+ u8 Twr; /* DCT Twr (busclocks) */
+ u8 Trrd; /* DCT Trrd (busclocks) */
+ u8 Twtr; /* DCT Twtr (busclocks) */
+ u8 Trfc[4]; /* DCT Logical DIMM0 Trfc
+ 0=75ns (for 256Mb devs)
+ 1=105ns (for 512Mb devs)
+ 2=127.5ns (for 1Gb devs)
+ 3=195ns (for 2Gb devs)
+ 4=327.5ns (for 4Gb devs) */
+ /* DCT Logical DIMM1 Trfc (see Trfc0 for format) */
+ /* DCT Logical DIMM2 Trfc (see Trfc0 for format) */
+ /* DCT Logical DIMM3 Trfc (see Trfc0 for format) */
+ u16 CSPresent; /* For each bit n 0..7, 1=Chip-select n is present */
+ u16 CSTestFail; /* For each bit n 0..7, 1=Chip-select n is present but disabled */
+ u32 DCTSysBase; /* BASE[39:8] (system address) of this Node's DCTs. */
+ u32 DCTHoleBase; /* If not zero, BASE[39:8] (system address) of dram hole for HW remapping. Dram hole exists on this Node's DCTs. */
+ u32 DCTSysLimit; /* LIMIT[39:8] (system address) of this Node's DCTs */
+ u16 PresetmaxFreq; /* Maximum OEM defined DDR frequency
+ 200=200Mhz (DDR400)
+ 266=266Mhz (DDR533)
+ 333=333Mhz (DDR667)
+ 400=400Mhz (DDR800) */
+ u8 _2Tmode; /* 1T or 2T CMD mode (slow access mode)
+ 1=1T
+ 2=2T */
+ u8 TrwtTO; /* DCT TrwtTO (busclocks)*/
+ u8 Twrrd; /* DCT Twrrd (busclocks)*/
+ u8 Twrwr; /* DCT Twrwr (busclocks)*/
+ u8 Trdrd; /* DCT Trdrd (busclocks)*/
+ u32 CH_ODC_CTL[2]; /* Output Driver Strength (see BKDG FN2:Offset 9Ch, index 00h*/
+ u32 CH_ADDR_TMG[2]; /* Address Bus Timing (see BKDG FN2:Offset 9Ch, index 04h*/
+ /* Output Driver Strength (see BKDG FN2:Offset 9Ch, index 20h*/
+ /* Address Bus Timing (see BKDG FN2:Offset 9Ch, index 24h*/
+ u16 CH_EccDQSLike[2]; /* CHA DQS ECC byte like...*/
+ u8 CH_EccDQSScale[2]; /* CHA DQS ECC byte scale*/
+ /* CHA DQS ECC byte like...*/
+ /* CHA DQS ECC byte scale*/
+ u8 MaxAsyncLat; /* Max Asynchronous Latency (ns)*/
+ // NOTE: Not used in Barcelona - u8 CH_D_RCVRDLY[2][4];
+ /* CHA DIMM 0 - 4 Receiver Enable Delay*/
+ /* CHB DIMM 0 - 4 Receiver Enable Delay */
+ // NOTE: Not used in Barcelona - u8 CH_D_B_DQS[2][2][8];
+ /* CHA Byte 0-7 Write DQS Delay */
+ /* CHA Byte 0-7 Read DQS Delay */
+ /* CHB Byte 0-7 Write DQS Delay */
+ /* CHB Byte 0-7 Read DQS Delay */
+ u32 PtrPatternBufA; /* Ptr on stack to aligned DQS testing pattern*/
+ u32 PtrPatternBufB; /* Ptr on stack to aligned DQS testing pattern*/
+ u8 Channel; /* Current Channel (0= CH A, 1=CH B)*/
+ u8 ByteLane; /* Current Byte Lane (0..7)*/
+ u8 Direction; /* Current DQS-DQ training write direction (0=read, 1=write)*/
+ u8 Pattern; /* Current pattern*/
+ u8 DQSDelay; /* Current DQS delay value*/
+ u32 TrainErrors; /* Current Training Errors*/
+
+ u32 AMC_TSC_DeltaLo; /* Time Stamp Counter measurement of AMC, Low dword*/
+ u32 AMC_TSC_DeltaHi; /* Time Stamp Counter measurement of AMC, High dword*/
+ // NOTE: Not used in Barcelona - u8 CH_D_DIR_MaxMin_B_Dly[2][4][2][2][8];
+ /* CH A byte lane 0 - 7 minimum filtered window passing DQS delay value*/
+ /* CH A byte lane 0 - 7 maximum filtered window passing DQS delay value*/
+ /* CH B byte lane 0 - 7 minimum filtered window passing DQS delay value*/
+ /* CH B byte lane 0 - 7 maximum filtered window passing DQS delay value*/
+ /* CH A byte lane 0 - 7 minimum filtered window passing DQS delay value*/
+ /* CH A byte lane 0 - 7 maximum filtered window passing DQS delay value*/
+ /* CH B byte lane 0 - 7 minimum filtered window passing DQS delay value*/
+ /* CH B byte lane 0 - 7 maximum filtered window passing DQS delay value*/
+ u32 LogicalCPUID; /* The logical CPUID of the node*/
+ u16 HostBiosSrvc1; /* Word sized general purpose field for use by host BIOS. Scratch space.*/
+ u32 HostBiosSrvc2; /* Dword sized general purpose field for use by host BIOS. Scratch space.*/
+ u16 DimmQRPresent; /* QuadRank DIMM present?*/
+ u16 DimmTrainFail; /* Bitmap showing which dimms failed training*/
+ u16 CSTrainFail; /* Bitmap showing which chipselects failed training*/
+ u16 DimmYr06; /* Bitmap indicating which Dimms have a manufactur's year code <= 2006*/
+ u16 DimmWk2406; /* Bitmap indicating which Dimms have a manufactur's week code <= 24 of 2006 (June)*/
+ u16 DimmDRPresent; /* Bitmap indicating that Dual Rank Dimms are present*/
+ u16 DimmPlPresent; /* Bitmap indicating that Planar (1) or Stacked (0) Dimms are present.*/
+ u16 ChannelTrainFai; /* Bitmap showing the chanel informaiton about failed Chip Selects
+ 0 in any bit field indicates Channel 0
+ 1 in any bit field indicates Channel 1 */
+ u16 CSUsrTestFail; /* Chip selects excluded by user */
+/* DCTStatStruct_F - end */
+
+ u16 CH_MaxRdLat[2]; /* Max Read Latency (ns) for DCT 0*/
+ /* Max Read Latency (ns) for DCT 1*/
+ u8 CH_D_DIR_B_DQS[2][4][2][9]; /* [A/B] [DIMM1-4] [R/W] [DQS] */
+ /* CHA DIMM0 Byte 0 - 7 and Check Write DQS Delay*/
+ /* CHA DIMM0 Byte 0 - 7 and Check Read DQS Delay*/
+ /* CHA DIMM1 Byte 0 - 7 and Check Write DQS Delay*/
+ /* CHA DIMM1 Byte 0 - 7 and Check Read DQS Delay*/
+ /* CHB DIMM0 Byte 0 - 7 and Check Write DQS Delay*/
+ /* CHB DIMM0 Byte 0 - 7 and Check Read DQS Delay*/
+ /* CHB DIMM1 Byte 0 - 7 and Check Write DQS Delay*/
+ /* CHB DIMM1 Byte 0 - 7 and Check Read DQS Delay*/
+ u8 CH_D_B_RCVRDLY[2][4][8]; /* [A/B] [DIMM0-3] [DQS] */
+ /* CHA DIMM 0 Receiver Enable Delay*/
+ /* CHA DIMM 1 Receiver Enable Delay*/
+ /* CHA DIMM 2 Receiver Enable Delay*/
+ /* CHA DIMM 3 Receiver Enable Delay*/
+
+ /* CHB DIMM 0 Receiver Enable Delay*/
+ /* CHB DIMM 1 Receiver Enable Delay*/
+ /* CHB DIMM 2 Receiver Enable Delay*/
+ /* CHB DIMM 3 Receiver Enable Delay*/
+ u8 CH_D_BC_RCVRDLY[2][4];
+ /* CHA DIMM 0 - 4 Check Byte Receiver Enable Delay*/
+ /* CHB DIMM 0 - 4 Check Byte Receiver Enable Delay*/
+ u8 DIMMValidDCT[2]; /* DIMM# in DCT0*/
+ /* DIMM# in DCT1*/
+ u8 MaxDCTs; /* Max number of DCTs in system*/
+ // NOTE: removed u8 DCT. Use ->dev_ for pci R/W; /*DCT pointer*/
+ u8 GangedMode; /* Ganged mode enabled, 0 = disabled, 1 = enabled*/
+ u8 DRPresent; /* Family 10 present flag, 0 = n0t Fam10, 1 = Fam10*/
+ u32 NodeSysLimit; /* BASE[39:8],for DCT0+DCT1 system address*/
+ u8 WrDatGrossH;
+ u8 DqsRcvEnGrossL;
+ // NOTE: Not used - u8 NodeSpeed /* Bus Speed (to set Controller)
+ /* 1=200Mhz */
+ /* 2=266Mhz */
+ /* 3=333Mhz */
+ // NOTE: Not used - u8 NodeCASL /* CAS latency DCT setting
+ /* 0=2.0 */
+ /* 1=3.0 */
+ /* 2=4.0 */
+ /* 3=5.0 */
+ /* 4=6.0 */
+ u8 TrwtWB;
+ u8 CurrRcvrCHADelay; /* for keep current RcvrEnDly of chA*/
+ u16 T1000; /* get the T1000 figure (cycle time (ns)*1K)*/
+ u8 DqsRcvEn_Pass; /* for TrainRcvrEn byte lane pass flag*/
+ u8 DqsRcvEn_Saved; /* for TrainRcvrEn byte lane saved flag*/
+ u8 SeedPass1Remainder; /* for Phy assisted DQS receiver enable training*/
+
+ /* for second pass - Second pass should never run for Fam10*/
+ // NOTE: Not used for Barcelona - u8 CH_D_B_RCVRDLY_1[2][4][8]; /* CHA DIMM 0 Receiver Enable Delay*/
+ /* CHA DIMM 1 Receiver Enable Delay*/
+ /* CHA DIMM 2 Receiver Enable Delay*/
+ /* CHA DIMM 3 Receiver Enable Delay*/
+
+ /* CHB DIMM 0 Receiver Enable Delay*/
+ /* CHB DIMM 1 Receiver Enable Delay*/
+ /* CHB DIMM 2 Receiver Enable Delay*/
+ /* CHB DIMM 3 Receiver Enable Delay*/
+
+ u8 ClToNB_flag; /* is used to restore ClLinesToNbDis bit after memory */
+ u32 NodeSysBase; /* for channel interleave usage */
+
+/* New for LB Support */
+ u8 NodePresent;
+ u32 dev_host;
+ u32 dev_map;
+ u32 dev_dct;
+ u32 dev_nbmisc;
+};
+
+/*===============================================================================
+ Local Error Status Codes (DCTStatStruc.ErrCode)
+===============================================================================*/
+#define SC_RunningOK 0
+#define SC_VarianceErr 1 /* Running non-optimally*/
+#define SC_StopError 2 /* Not Running*/
+#define SC_FatalErr 3 /* Fatal Error, MCTB has exited immediately*/
+
+/*===============================================================================
+ Local Error Status (DCTStatStruc.ErrStatus[31:0])
+===============================================================================*/
+#define SB_NoDimms 0
+#define SB_DIMMChkSum 1
+#define SB_DimmMismatchM 2 /* dimm module type(buffer) mismatch*/
+#define SB_DimmMismatchT 3 /* dimm CL/T mismatch*/
+#define SB_DimmMismatchO 4 /* dimm organization mismatch (128-bit)*/
+#define SB_NoTrcTrfc 5 /* SPD missing Trc or Trfc info*/
+#define SB_NoCycTime 6 /* SPD missing byte 23 or 25*/
+#define SB_BkIntDis 7 /* Bank interleave requested but not enabled*/
+#define SB_DramECCDis 8 /* Dram ECC requested but not enabled*/
+#define SB_SpareDis 9 /* Online spare requested but not enabled*/
+#define SB_MinimumMode 10 /* Running in Minimum Mode*/
+#define SB_NORCVREN 11 /* No DQS Receiver Enable pass window found*/
+#define SB_CHA2BRCVREN 12 /* DQS Rcvr En pass window CHA to CH B too large*/
+#define SB_SmallRCVR 13 /* DQS Rcvr En pass window too small (far right of dynamic range)*/
+#define SB_NODQSPOS 14 /* No DQS-DQ passing positions*/
+#define SB_SMALLDQS 15 /* DQS-DQ passing window too small*/
+#define SB_DCBKScrubDis 16 /* DCache scrub requested but not enabled */
+
+/*===============================================================================
+ Local Configuration Status (DCTStatStruc.Status[31:0])
+===============================================================================*/
+#define SB_Registered 0 /* All DIMMs are Registered*/
+#define SB_ECCDIMMs 1 /* All banks ECC capable*/
+#define SB_PARDIMMs 2 /* All banks Addr/CMD Parity capable*/
+#define SB_DiagClks 3 /* Jedec ALL slots clock enable diag mode*/
+#define SB_128bitmode 4 /* DCT in 128-bit mode operation*/
+#define SB_64MuxedMode 5 /* DCT in 64-bit mux'ed mode.*/
+#define SB_2TMode 6 /* 2T CMD timing mode is enabled.*/
+#define SB_SWNodeHole 7 /* Remapping of Node Base on this Node to create a gap.*/
+#define SB_HWHole 8 /* Memory Hole created on this Node using HW remapping.*/
+#define SB_Over400MHz 9 /* DCT freq >= 400MHz flag*/
+#define SB_DQSPos_Pass2 10 /* Using for TrainDQSPos DIMM0/1, when freq>=400MHz*/
+#define SB_DQSRcvLimit 11 /* Using for DQSRcvEnTrain to know we have reached to upper bound.*/
+#define SB_ExtConfig 12 /* Indicator the default setting for extend PCI configuration support*/
+
+
+
+
+/*===============================================================================
+ NVRAM/run-time-configurable Items
+===============================================================================*/
+/*Platform Configuration*/
+#define NV_PACK_TYPE 0 /* CPU Package Type (2-bits)
+ 0=NPT L1
+ 1=NPT M2
+ 2=NPT S1*/
+#define NV_MAX_NODES 1 /* Number of Nodes/Sockets (4-bits)*/
+#define NV_MAX_DIMMS 2 /* Number of DIMM slots for the specified Node ID (4-bits)*/
+#define NV_MAX_MEMCLK 3 /* Maximum platform demonstrated Memclock (10-bits)
+ 200=200Mhz (DDR400)
+ 266=266Mhz (DDR533)
+ 333=333Mhz (DDR667)
+ 400=400Mhz (DDR800)*/
+#define NV_ECC_CAP 4 /* Bus ECC capable (1-bits)
+ 0=Platform not capable
+ 1=Platform is capable*/
+#define NV_4RANKType 5 /* Quad Rank DIMM slot type (2-bits)
+ 0=Normal
+ 1=R4 (4-Rank Registered DIMMs in AMD server configuration)
+ 2=S4 (Unbuffered SO-DIMMs)*/
+#define NV_BYPMAX 6 /* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
+ 4=4 times bypass (normal for non-UMA systems)
+ 7=7 times bypass (normal for UMA systems)*/
+#define NV_RDWRQBYP 7 /* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
+ 2=8 times (normal for non-UMA systems)
+ 3=16 times (normal for UMA systems)*/
+
+
+/*Dram Timing*/
+#define NV_MCTUSRTMGMODE 10 /* User Memclock Mode (2-bits)
+ 0=Auto, no user limit
+ 1=Auto, user limit provided in NV_MemCkVal
+ 2=Manual, user value provided in NV_MemCkVal*/
+#define NV_MemCkVal 11 /* Memory Clock Value (2-bits)
+ 0=200Mhz
+ 1=266Mhz
+ 2=333Mhz
+ 3=400Mhz*/
+
+/*Dram Configuration*/
+#define NV_BankIntlv 20 /* Dram Bank (chip-select) Interleaving (1-bits)
+ 0=disable
+ 1=enable*/
+#define NV_AllMemClks 21 /* Turn on All DIMM clocks (1-bits)
+ 0=normal
+ 1=enable all memclocks*/
+#define NV_SPDCHK_RESTRT 22 /* SPD Check control bitmap (1-bits)
+ 0=Exit current node init if any DIMM has SPD checksum error
+ 1=Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
+#define NV_DQSTrainCTL 23 /* DQS Signal Timing Training Control
+ 0=skip DQS training
+ 1=perform DQS training*/
+#define NV_NodeIntlv 24 /* Node Memory Interleaving (1-bits)
+ 0=disable
+ 1=enable*/
+#define NV_BurstLen32 25 /* BurstLength32 for 64-bit mode (1-bits)
+ 0=disable (normal)
+ 1=enable (4 beat burst when width is 64-bits)*/
+
+/*Dram Power*/
+#define NV_CKE_PDEN 30 /* CKE based power down mode (1-bits)
+ 0=disable
+ 1=enable*/
+#define NV_CKE_CTL 31 /* CKE based power down control (1-bits)
+ 0=per Channel control
+ 1=per Chip select control*/
+#define NV_CLKHZAltVidC3 32 /* Memclock tri-stating during C3 and Alt VID (1-bits)
+ 0=disable
+ 1=enable*/
+
+/*Memory Map/Mgt.*/
+#define NV_BottomIO 40 /* Bottom of 32-bit IO space (8-bits)
+ NV_BottomIO[7:0]=Addr[31:24]*/
+#define NV_BottomUMA 41 /* Bottom of shared graphics dram (8-bits)
+ NV_BottomUMA[7:0]=Addr[31:24]*/
+#define NV_MemHole 42 /* Memory Hole Remapping (1-bits)
+ 0=disable
+ 1=enable */
+
+/*ECC*/
+#define NV_ECC 50 /* Dram ECC enable*/
+#define NV_NBECC 52 /* ECC MCE enable*/
+#define NV_ChipKill 53 /* Chip-Kill ECC Mode enable*/
+#define NV_ECCRedir 54 /* Dram ECC Redirection enable*/
+#define NV_DramBKScrub 55 /* Dram ECC Background Scrubber CTL*/
+#define NV_L2BKScrub 56 /* L2 ECC Background Scrubber CTL*/
+#define NV_DCBKScrub 57 /* DCache ECC Background Scrubber CTL*/
+#define NV_CS_SpareCTL 58 /* Chip Select Spare Control bit 0:
+ 0=disable Spare
+ 1=enable Spare */
+ /* Chip Select Spare Control bit 1-4:
+ Reserved, must be zero*/
+#define NV_SyncOnUnEccEn 61 /* SyncOnUnEccEn control
+ 0=disable
+ 1=enable*/
+#define NV_Unganged 62
+
+#define NV_ChannelIntlv 63 /* Channel Interleaving (3-bits)
+ xx0b = disable
+ yy1b = enable with DctSelIntLvAddr set to yyb */
+
+
+#ifndef MAX_NODES_SUPPORTED
+#define MAX_NODES_SUPPORTED 8
+#endif
+
+#ifndef MAX_DIMMS_SUPPORTED
+#define MAX_DIMMS_SUPPORTED 8
+#endif
+
+#ifndef MAX_CS_SUPPORTED
+#define MAX_CS_SUPPORTED 8
+#endif
+
+#ifndef MCT_DIMM_SPARE_NO_WARM
+#define MCT_DIMM_SPARE_NO_WARM 0
+#endif
+
+
+u32 Get_NB32(u32 dev, u32 reg);
+void Set_NB32(u32 dev, u32 reg, u32 val);
+u32 Get_NB32_index(u32 dev, u32 index_reg, u32 index);
+void Set_NB32_index(u32 dev, u32 index_reg, u32 index, u32 data);
+u32 Get_NB32_index_wait(u32 dev, u32 index_reg, u32 index);
+void Set_NB32_index_wait(u32 dev, u32 index_reg, u32 index, u32 data);
+u32 OtherTiming_A_D(struct DCTStatStruc *pDCTstat, u32 val);
+void mct_ForceAutoPrecharge_D(struct DCTStatStruc *pDCTstat, u32 dct);
+u32 Modify_D3CMP(struct DCTStatStruc *pDCTstat, u32 dct, u32 value);
+u8 mct_checkNumberOfDqsRcvEn_1Pass(u8 pass);
+u32 SetupDqsPattern_1PassA(u8 Pass);
+u32 SetupDqsPattern_1PassB(u8 Pass);
+u8 mct_Get_Start_RcvrEnDly_1Pass(u8 Pass);
+u8 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat, u8 RcvrEnDly, u8 RcvrEnDlyLimit, u8 Channel, u8 Receiver, u8 Pass);
+void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+u32 mctGetLogicalCPUID(u32 Node);
+u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+void TrainReceiverEn_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA, u8 Pass);
+void mct_TrainDQSPos_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+void mctSetEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+void TrainMaxReadLatency_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+void mct_EndDQSTraining_D(struct MCTStatStruc *pMCTstat,struct DCTStatStruc *pDCTstatA);
+void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u8 RcvrEnDly, u8 FinalValue, u8 Channel, u8 Receiver, u32 dev, u32 index_reg, u8 Addl_Index, u8 Pass);
+void SetEccDQSRcvrEn_D(struct DCTStatStruc *pDCTstat, u8 Channel);
+void mctGet_PS_Cfg_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u32 dct);
+void InterleaveBanks_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct);
+void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct, u32 DramConfigHi);
+void mct_DramInit_Hw_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct);
+void SyncSetting(struct DCTStatStruc *pDCTstat);
+void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
+void mct_SetWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
+void mct_TrainRcvrEn_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 Pass);
+void mct_EnableDimmEccEn_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 _DisableDramECC);
+u32 procOdtWorkaround(struct DCTStatStruc *pDCTstat, u32 dct, u32 val);
+void mct_BeforeDramInit_D(struct DCTStatStruc *pDCTstat, u32 dct);
+void mctGet_DIMMAddr(struct DCTStatStruc *pDCTstat, u32 node);
+void mctSMBhub_Init(u32 node);
+int mctRead_SPD(u32 smaddr, u32 reg);
+void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+void InterleaveChannels_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
+static void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 ChipSel);
+void phyAssistedMemFnceTraining(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
+u8 mct_SaveRcvEnDly_D_1Pass(struct DCTStatStruc *pDCTstat, u8 pass);
+static void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request);
+static u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct);
+static void mct_Wait_10ns (u32 cycles);
+u8 mct_RcvrRankEnabled_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 Channel, u8 ChipSel);
+u32 mct_GetRcvrSysAddr_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 channel, u8 receiver, u8 *valid);
+void mct_Read1LTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u32 addr);
+#endif
diff --git a/src/northbridge/amd/amdmct/mct/mct_d_gcc.h b/src/northbridge/amd/amdmct/mct/mct_d_gcc.h
new file mode 100644
index 0000000000..3f0f2c1351
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mct_d_gcc.h
@@ -0,0 +1,388 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+static inline void _WRMSR(u32 addr, u32 lo, u32 hi)
+{
+ __asm__ volatile (
+ "wrmsr"
+ :
+ :"c"(addr),"a"(lo), "d" (hi)
+ );
+}
+
+
+static inline void _RDMSR(u32 addr, u32 *lo, u32 *hi)
+{
+ __asm__ volatile (
+ "rdmsr"
+ :"=a"(*lo), "=d" (*hi)
+ :"c"(addr)
+ );
+}
+
+
+static inline void _RDTSC(u32 *lo, u32 *hi)
+{
+ __asm__ volatile (
+ "rdtsc"
+ : "=a" (*lo), "=d"(*hi)
+ );
+}
+
+
+static inline void _cpu_id(u32 addr, u32 *val)
+{
+ __asm__ volatile(
+ "cpuid"
+ : "=a" (val[0]),
+ "=b" (val[1]),
+ "=c" (val[2]),
+ "=d" (val[3])
+ : "0" (addr));
+
+}
+
+
+static inline u32 bsr(u32 x)
+{
+ u8 i;
+ u32 ret = 0;
+
+ for(i=31; i>0; i--) {
+ if(x & (1<<i)) {
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+
+}
+
+
+static inline u32 bsf(u32 x)
+{
+ u8 i;
+ u32 ret = 32;
+
+ for(i=0; i<32; i++) {
+ if(x & (1<<i)) {
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+#define _MFENCE asm volatile ( "mfence")
+
+#define _SFENCE asm volatile ( "sfence" )
+
+/* prevent speculative execution of following instructions */
+#define _EXECFENCE asm volatile ("outb %al, $0xed")
+
+static inline u32 read_cr4(void)
+{
+ u32 cr4;
+ __asm__ volatile ("movl %%cr4, %0" : "=r" (cr4));
+ return cr4;
+}
+
+
+static inline void write_cr4(u32 cr4)
+{
+ __asm__ volatile ("movl %0, %%cr4" : : "r" (cr4));
+}
+
+
+u32 SetUpperFSbase(u32 addr_hi);
+
+
+static void proc_CLFLUSH(u32 addr_hi)
+{
+ SetUpperFSbase(addr_hi);
+
+ __asm__ volatile (
+ /* clflush fs:[eax] */
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "clflush %%fs:(%0)\n\t"
+ "mfence\n\t"
+ ::"a" (addr_hi<<8)
+ );
+}
+
+
+static void WriteLNTestPattern(u32 addr_lo, u8 *buf_a, u32 line_num)
+{
+ __asm__ volatile (
+ /*prevent speculative execution of following instructions*/
+ /* FIXME: needed ? */
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "1:\n\t"
+ "movdqa (%3), %%xmm0\n\t"
+ "movntdq %%xmm0, %%fs:(%0)\n\t" /* xmm0 is 128 bit */
+ "addl %1, %0\n\t"
+ "addl %1, %3\n\t"
+ "loop 1b\n\t"
+ "mfence\n\t"
+
+ :: "a" (addr_lo), "d" (16), "c" (line_num * 4), "b"(buf_a)
+ );
+
+}
+
+
+u32 read32_fs(u32 addr_lo)
+{
+ u32 value;
+ __asm__ volatile (
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "movl %%fs:(%1), %0\n\t"
+ :"=b"(value): "a" (addr_lo)
+ );
+ return value;
+}
+
+
+u8 read8_fs(u32 addr_lo)
+{
+ u8 byte;
+ __asm__ volatile (
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "movb %%fs:(%1), %b0\n\t"
+ "mfence\n\t"
+ :"=b"(byte): "a" (addr_lo)
+ );
+ return byte;
+}
+
+
+static void FlushDQSTestPattern_L9(u32 addr_lo)
+{
+ __asm__ volatile (
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "clflush %%fs:-128(%%ecx)\n\t"
+ "clflush %%fs:-64(%%ecx)\n\t"
+ "clflush %%fs:(%%ecx)\n\t"
+ "clflush %%fs:64(%%ecx)\n\t"
+
+ "clflush %%fs:-128(%%eax)\n\t"
+ "clflush %%fs:-64(%%eax)\n\t"
+ "clflush %%fs:(%%eax)\n\t"
+ "clflush %%fs:64(%%eax)\n\t"
+
+ "clflush %%fs:-128(%%ebx)\n\t"
+
+ :: "b" (addr_lo+128+8*64), "c"(addr_lo+128),
+ "a"(addr_lo+128+4*64)
+ );
+
+}
+
+
+static __attribute__((noinline)) void FlushDQSTestPattern_L18(u32 addr_lo)
+{
+ __asm__ volatile (
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "clflush %%fs:-128(%%eax)\n\t"
+ "clflush %%fs:-64(%%eax)\n\t"
+ "clflush %%fs:(%%eax)\n\t"
+ "clflush %%fs:64(%%eax)\n\t"
+
+ "clflush %%fs:-128(%%edi)\n\t"
+ "clflush %%fs:-64(%%edi)\n\t"
+ "clflush %%fs:(%%edi)\n\t"
+ "clflush %%fs:64(%%edi)\n\t"
+
+ "clflush %%fs:-128(%%ebx)\n\t"
+ "clflush %%fs:-64(%%ebx)\n\t"
+ "clflush %%fs:(%%ebx)\n\t"
+ "clflush %%fs:64(%%ebx)\n\t"
+
+ "clflush %%fs:-128(%%ecx)\n\t"
+ "clflush %%fs:-64(%%ecx)\n\t"
+ "clflush %%fs:(%%ecx)\n\t"
+ "clflush %%fs:64(%%ecx)\n\t"
+
+ "clflush %%fs:-128(%%edx)\n\t"
+ "clflush %%fs:-64(%%edx)\n\t"
+
+ :: "b" (addr_lo+128+8*64), "c" (addr_lo+128+12*64),
+ "d" (addr_lo +128+16*64), "a"(addr_lo+128),
+ "D"(addr_lo+128+4*64)
+ );
+}
+
+
+static void ReadL18TestPattern(u32 addr_lo)
+{
+ // set fs and use fs prefix to access the mem
+ __asm__ volatile (
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "movl %%fs:-128(%%esi), %%eax\n\t" //TestAddr cache line
+ "movl %%fs:-64(%%esi), %%eax\n\t" //+1
+ "movl %%fs:(%%esi), %%eax\n\t" //+2
+ "movl %%fs:64(%%esi), %%eax\n\t" //+3
+
+ "movl %%fs:-128(%%edi), %%eax\n\t" //+4
+ "movl %%fs:-64(%%edi), %%eax\n\t" //+5
+ "movl %%fs:(%%edi), %%eax\n\t" //+6
+ "movl %%fs:64(%%edi), %%eax\n\t" //+7
+
+ "movl %%fs:-128(%%ebx), %%eax\n\t" //+8
+ "movl %%fs:-64(%%ebx), %%eax\n\t" //+9
+ "movl %%fs:(%%ebx), %%eax\n\t" //+10
+ "movl %%fs:64(%%ebx), %%eax\n\t" //+11
+
+ "movl %%fs:-128(%%ecx), %%eax\n\t" //+12
+ "movl %%fs:-64(%%ecx), %%eax\n\t" //+13
+ "movl %%fs:(%%ecx), %%eax\n\t" //+14
+ "movl %%fs:64(%%ecx), %%eax\n\t" //+15
+
+ "movl %%fs:-128(%%edx), %%eax\n\t" //+16
+ "movl %%fs:-64(%%edx), %%eax\n\t" //+17
+ "mfence\n\t"
+
+ :: "a"(0), "b" (addr_lo+128+8*64), "c" (addr_lo+128+12*64),
+ "d" (addr_lo +128+16*64), "S"(addr_lo+128),
+ "D"(addr_lo+128+4*64)
+ );
+
+}
+
+
+static void ReadL9TestPattern(u32 addr_lo)
+{
+
+ // set fs and use fs prefix to access the mem
+ __asm__ volatile (
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+
+ "movl %%fs:-128(%%ecx), %%eax\n\t" //TestAddr cache line
+ "movl %%fs:-64(%%ecx), %%eax\n\t" //+1
+ "movl %%fs:(%%ecx), %%eax\n\t" //+2
+ "movl %%fs:64(%%ecx), %%eax\n\t" //+3
+
+ "movl %%fs:-128(%%edx), %%eax\n\t" //+4
+ "movl %%fs:-64(%%edx), %%eax\n\t" //+5
+ "movl %%fs:(%%edx), %%eax\n\t" //+6
+ "movl %%fs:64(%%edx), %%eax\n\t" //+7
+
+ "movl %%fs:-128(%%ebx), %%eax\n\t" //+8
+ "mfence\n\t"
+
+ :: "a"(0), "b" (addr_lo+128+8*64), "c"(addr_lo+128),
+ "d"(addr_lo+128+4*64)
+ );
+
+}
+
+
+static void ReadMaxRdLat1CLTestPattern_D(u32 addr)
+{
+ SetUpperFSbase(addr);
+
+ __asm__ volatile (
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "movl %%fs:-128(%%esi), %%eax\n\t" //TestAddr cache line
+ "movl %%fs:-64(%%esi), %%eax\n\t" //+1
+ "movl %%fs:(%%esi), %%eax\n\t" //+2
+ "mfence\n\t"
+ :: "a"(0), "S"((addr<<8)+128)
+ );
+
+}
+
+
+void WriteMaxRdLat1CLTestPattern_D(u32 buf, u32 addr)
+{
+ SetUpperFSbase(addr);
+
+ __asm__ volatile (
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "1:\n\t"
+ "movdqa (%3), %%xmm0\n\t"
+ "movntdq %%xmm0, %%fs:(%0)\n\t" /* xmm0 is 128 bit */
+ "addl %1, %0\n\t"
+ "addl %1, %3\n\t"
+ "loop 1b\n\t"
+ "mfence\n\t"
+
+ :: "a" (addr<<8), "d" (16), "c" (3 * 4), "b"(buf)
+ );
+}
+
+
+static void FlushMaxRdLatTestPattern_D(u32 addr)
+{
+ /* Flush a pattern of 72 bit times (per DQ) from cache.
+ * This procedure is used to ensure cache miss on the next read training.
+ */
+
+ SetUpperFSbase(addr);
+
+ __asm__ volatile (
+ "outb %%al, $0xed\n\t" /* _EXECFENCE */
+ "clflush %%fs:-128(%%esi)\n\t" //TestAddr cache line
+ "clflush %%fs:-64(%%esi)\n\t" //+1
+ "clflush %%fs:(%%esi)\n\t" //+2
+ "mfence\n\t"
+
+ :: "S"((addr<<8)+128)
+ );
+}
+
+
+u32 stream_to_int(u8 *p)
+{
+ int i;
+ u32 val;
+ u32 valx;
+
+ val = 0;
+
+ for(i=3; i>=0; i--) {
+ val <<= 8;
+ valx = *(p+i);
+ val |= valx;
+ }
+
+ return val;
+}
+
+
+void oemSet_NB32(u32 addr, u32 val, u8 *valid)
+{
+}
+
+
+u32 oemGet_NB32(u32 addr, u8 *valid)
+{
+ *valid = 0;
+ return 0xffffffff;
+}
+
+
+u8 oemNodePresent_D(u8 Node, u8 *ret)
+{
+ *ret = 0;
+ return 0;
+}
diff --git a/src/northbridge/amd/amdmct/mct/mct_fd.c b/src/northbridge/amd/amdmct/mct/mct_fd.c
new file mode 100644
index 0000000000..b13467918e
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mct_fd.c
@@ -0,0 +1,25 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+
+u8 amd_FD_support(void)
+{
+ return 1;
+}
diff --git a/src/northbridge/amd/amdmct/mct/mctardk3.c b/src/northbridge/amd/amdmct/mct/mctardk3.c
new file mode 100644
index 0000000000..7300a99aaf
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctardk3.c
@@ -0,0 +1,206 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 021100xFF301 USA
+ */
+
+
+static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload,
+ u8 DATAAload, u32 *AddrTmgCTL, u32 *ODC_CTL);
+
+
+void mctGet_PS_Cfg_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u32 dct)
+{
+ u16 val, valx;
+
+ print_tx("dct: ", dct);
+ print_tx("Speed: ", pDCTstat->Speed);
+
+ Get_ChannelPS_Cfg0_D(pDCTstat->MAdimms[dct], pDCTstat->Speed,
+ pDCTstat->MAload[dct], pDCTstat->DATAload[dct],
+ &(pDCTstat->CH_ADDR_TMG[dct]), &(pDCTstat->CH_ODC_CTL[dct]));
+
+
+ if(pDCTstat->MAdimms[dct] == 1)
+ pDCTstat->CH_ODC_CTL[dct] |= 0x20000000; /* 75ohms */
+ else
+ pDCTstat->CH_ODC_CTL[dct] |= 0x10000000; /* 150ohms */
+
+ pDCTstat->_2Tmode = 1;
+
+ /* use byte lane 4 delay for ECC lane */
+ pDCTstat->CH_EccDQSLike[0] = 0x0504;
+ pDCTstat->CH_EccDQSScale[0] = 0; /* 100% byte lane 4 */
+ pDCTstat->CH_EccDQSLike[1] = 0x0504;
+ pDCTstat->CH_EccDQSScale[1] = 0; /* 100% byte lane 4 */
+
+
+ /*
+ Overrides and/or exceptions
+ */
+
+ /* 1) QRx4 needs to adjust CS/ODT setup time */
+ // FIXME: Add Ax support?
+ if (mctGet_NVbits(NV_MAX_DIMMS) == 4) {
+ if (pDCTstat->DimmQRPresent != 0) {
+ pDCTstat->CH_ADDR_TMG[dct] &= 0xFF00FFFF;
+ pDCTstat->CH_ADDR_TMG[dct] |= 0x00000000;
+ if (pDCTstat->MAdimms[dct] == 4) {
+ pDCTstat->CH_ADDR_TMG[dct] &= 0xFF00FFFF;
+ pDCTstat->CH_ADDR_TMG[dct] |= 0x002F0000;
+ if (pDCTstat->Speed == 3 || pDCTstat->Speed == 4) {
+ pDCTstat->CH_ADDR_TMG[dct] &= 0xFF00FFFF;
+ pDCTstat->CH_ADDR_TMG[dct] |= 0x00002F00;
+ if (pDCTstat->MAdimms[dct] == 4)
+ pDCTstat->CH_ODC_CTL[dct] = 0x00331222;
+ }
+ }
+ }
+ }
+
+
+ /* 2) DRx4 (R/C-J) @ DDR667 needs to adjust CS/ODT setup time */
+ if (pDCTstat->Speed == 3 || pDCTstat->Speed == 4) {
+ val = pDCTstat->Dimmx4Present;
+ if (dct == 0) {
+ val &= 0x55;
+ } else {
+ val &= 0xAA;
+ val >>= 1;
+ }
+ val &= pDCTstat->DIMMValid;
+ if (val) {
+ //FIXME: skip for Ax
+ valx = pDCTstat->DimmDRPresent;
+ if (dct == 0) {
+ valx &= 0x55;
+ } else {
+ valx &= 0xAA;
+ valx >>= 1;
+ }
+ if (mctGet_NVbits(NV_MAX_DIMMS) == 8) {
+ val &= valx;
+ if (val != 0) {
+ pDCTstat->CH_ADDR_TMG[dct] &= 0xFFFF00FF;
+ pDCTstat->CH_ADDR_TMG[dct] |= 0x00002F00;
+ }
+ } else {
+ val &= valx;
+ if (val != 0) {
+ if (pDCTstat->Speed == 3 || pDCTstat->Speed == 3) {
+ pDCTstat->CH_ADDR_TMG[dct] &= 0xFFFF00FF;
+ pDCTstat->CH_ADDR_TMG[dct] |= 0x00002F00;
+ }
+ }
+
+ }
+ }
+ }
+
+
+ pDCTstat->CH_ODC_CTL[dct] = procOdtWorkaround(pDCTstat, dct, pDCTstat->CH_ODC_CTL[dct]);
+
+ print_tx("CH_ODC_CTL: ", pDCTstat->CH_ODC_CTL[dct]);
+ print_tx("CH_ADDR_TMG: ", pDCTstat->CH_ADDR_TMG[dct]);
+
+
+}
+
+
+/*===============================================================================
+ * Vendor is responsible for correct settings.
+ * M2/Unbuffered 4 Slot - AMD Design Guideline.
+ *===============================================================================
+ * #1, BYTE, Speed (DCTStatstruc.Speed) (Secondary Key)
+ * #2, BYTE, number of Address bus loads on the Channel. (Tershery Key)
+ * These must be listed in ascending order.
+ * FFh (0xFE) has special meanying of 'any', and must be listed first for each speed grade.
+ * #3, DWORD, Address Timing Control Register Value
+ * #4, DWORD, Output Driver Compensation Control Register Value
+ * #5, BYTE, Number of DIMMs (Primary Key)
+ */
+static const u8 Table_ATC_ODC_8D_D[] = {
+ 0xFE, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x22, 0x12, 0x11, 0x00, 1,
+ 0xFE, 0xFF, 0x00, 0x00, 0x37, 0x00, 0x22, 0x12, 0x11, 0x00, 2,
+ 1, 0xFF, 0x00, 0x00, 0x2F, 0x00, 0x22, 0x12, 0x11, 0x00, 3,
+ 2, 0xFF, 0x00, 0x00, 0x2F, 0x00, 0x22, 0x12, 0x11, 0x00, 3,
+ 3, 0xFF, 0x2F, 0x00, 0x2F, 0x00, 0x22, 0x12, 0x11, 0x00, 3,
+ 4, 0xFF, 0x2F, 0x00, 0x2F, 0x00, 0x22, 0x12, 0x33, 0x00, 3,
+ 1, 0xFF, 0x00, 0x00, 0x2F, 0x00, 0x22, 0x12, 0x11, 0x00, 4,
+ 2, 0xFF, 0x00, 0x00, 0x2F, 0x00, 0x22, 0x12, 0x11, 0x00, 4,
+ 3, 0xFF, 0x2F, 0x00, 0x2F, 0x00, 0x22, 0x12, 0x33, 0x00, 4,
+ 4, 0xFF, 0x2F, 0x00, 0x2F, 0x00, 0x22, 0x12, 0x33, 0x00, 4,
+ 0xFF
+};
+
+static const u8 Table_ATC_ODC_4D_D[] = {
+ 0xFE, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x22, 0x12, 0x11, 0x00, 1,
+ 0xFE, 0xFF, 0x00, 0x00, 0x37, 0x00, 0x22, 0x12, 0x11, 0x00, 2,
+ 0xFE, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x22, 0x12, 0x11, 0x00, 3,
+ 0xFE, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x22, 0x12, 0x11, 0x00, 4,
+ 0xFF
+};
+
+static const u8 Table_ATC_ODC_8D_D_Ax[] = {
+ 1,0xff,0x00,0x00,0x2F,0x0,0x22,0x12,0x11,0x00, 0xFE,
+ 2,0xff,0x00,0x00,0x2C,0x0,0x22,0x12,0x11,0x00, 0xFE,
+ 3,0xff,0x00,0x00,0x2C,0x0,0x22,0x12,0x11,0x00, 0xFE,
+ 4,0xff,0x00,0x33,0x2F,0x0,0x22,0x12,0x11,0x00, 0xFE,
+ 0xFF
+};
+
+static const u8 Table_ATC_ODC_4D_D_Ax[] = {
+ 1,0xff,0x00,0x00,0x2F,0x00,0x22,0x12,0x11,0x00, 0xFE,
+ 2,0xff,0x00,0x2C,0x2C,0x00,0x22,0x12,0x11,0x00, 0xFE,
+ 3,0xff,0x00,0x00,0x2C,0x00,0x22,0x12,0x11,0x00, 0xFE,
+ 4,0xff,0x00,0x33,0x2F,0x00,0x22,0x12,0x11,0x00, 0xFE,
+ 0xFF
+};
+
+
+static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload,
+ u8 DATAAload, u32 *AddrTmgCTL, u32 *ODC_CTL)
+{
+ const u8 *p;
+
+ *AddrTmgCTL = 0;
+ *ODC_CTL = 0;
+
+ if(mctGet_NVbits(NV_MAX_DIMMS) == 8) {
+ /* 8 DIMM Table */
+ p = Table_ATC_ODC_8D_D;
+ //FIXME Add Ax support
+ } else {
+ /* 4 DIMM Table*/
+ p = Table_ATC_ODC_4D_D;
+ //FIXME Add Ax support
+ }
+
+ while (*p != 0xFF) {
+ if ((MAAdimms == *(p+10)) || (*(p+10 ) == 0xFE)) {
+ if((*p == Speed) || (*p == 0xFE)) {
+ if(MAAload <= *(p+1)) {
+ *AddrTmgCTL = stream_to_int((u8*)(p+2));
+ *ODC_CTL = stream_to_int((u8*)(p+6));
+ break;
+ }
+ }
+ }
+ p+=11;
+ }
+}
+
diff --git a/src/northbridge/amd/amdmct/mct/mctardk4.c b/src/northbridge/amd/amdmct/mct/mctardk4.c
new file mode 100644
index 0000000000..c3f1aeab5a
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctardk4.c
@@ -0,0 +1,172 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload,
+ u8 DATAAload, u32 *AddrTmgCTL, u32 *ODC_CTL,
+ u32 *CMDmode);
+
+
+void mctGet_PS_Cfg_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u32 dct)
+
+ print_tx("dct: ", dct);
+ print_tx("Speed: ", pDCTstat->Speed);
+
+ Get_ChannelPS_Cfg0_D(pDCTstat->MAdimms[dct], pDCTstat->Speed,
+ pDCTstat->MAload[dct], pDCTstat->DATAload[dct],
+ &(pDCTstat->CH_ADDR_TMG[dct]), &(pDCTstat->CH_ODC_CTL[dct]),
+ &pDCTstat->_2Tmode);
+
+// print_tx("1 CH_ODC_CTL: ", pDCTstat->CH_ODC_CTL[dct]);
+// print_tx("1 CH_ADDR_TMG: ", pDCTstat->CH_ADDR_TMG[dct]);
+
+ if(pDCTstat->MAdimms[dct] == 1)
+ pDCTstat->CH_ODC_CTL[dct] |= 0x20000000; /* 75ohms */
+ else
+ pDCTstat->CH_ODC_CTL[dct] |= 0x10000000; /* 150ohms */
+
+
+ /*
+ * Overrides and/or workarounds
+ */
+ pDCTstat->CH_ODC_CTL[dct] = procOdtWorkaround(pDCTstat, dct, pDCTstat->CH_ODC_CTL[dct]);
+
+ print_tx("4 CH_ODC_CTL: ", pDCTstat->CH_ODC_CTL[dct]);
+ print_tx("4 CH_ADDR_TMG: ", pDCTstat->CH_ADDR_TMG[dct]);
+}
+
+/*=============================================================================
+ * Vendor is responsible for correct settings.
+ * M2/Unbuffered 4 Slot - AMD Design Guideline.
+ *=============================================================================
+ * #1, BYTE, Speed (DCTStatstruc.Speed)
+ * #2, BYTE, number of Address bus loads on the Channel.
+ * These must be listed in ascending order.
+ * FFh (-1) has special meanying of 'any', and must be listed first for
+ * each speed grade.
+ * #3, DWORD, Address Timing Control Register Value
+ * #4, DWORD, Output Driver Compensation Control Register Value
+ */
+
+static const u8 Table_ATC_ODC_D_Bx[] = {
+ 1, 0xFF, 0x00, 0x2F, 0x2F, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 12, 0x00, 0x2F, 0x2F, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 16, 0x00, 0x2F, 0x00, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 20, 0x00, 0x2F, 0x38, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 24, 0x00, 0x2F, 0x37, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 32, 0x00, 0x2F, 0x34, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 12, 0x20, 0x22, 0x20, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 16, 0x20, 0x22, 0x30, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 20, 0x20, 0x22, 0x2C, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 24, 0x20, 0x22, 0x2A, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 32, 0x20, 0x22, 0x2B, 0x0, 0x22, 0x13, 0x11, 0x0
+ 4, 0xFF, 0x20, 0x25, 0x20, 0x0, 0x22, 0x33, 0x11, 0x0
+ 5, 0xFF, 0x20, 0x20, 0x2F, 0x0, 0x22, 0x32, 0x11, 0x0
+ 0FFh
+
+static const u8 Table_ATC_ODC_D_Ax[] = {
+ 1, 0xFF, 0x00, 0x2F, 0x2F, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 12, 0x00, 0x2F, 0x2F, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 16, 0x00, 0x2F, 0x00, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 20, 0x00, 0x2F, 0x38, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 24, 0x00, 0x2F, 0x37, 0x0, 0x22, 0x13, 0x11, 0x0
+ 2, 32, 0x00, 0x2F, 0x34, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 12, 0x20, 0x22, 0x20, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 16, 0x20, 0x22, 0x30, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 20, 0x20, 0x22, 0x2C, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 24, 0x20, 0x22, 0x2A, 0x0, 0x22, 0x13, 0x11, 0x0
+ 3, 32, 0x20, 0x22, 0x2B, 0x0, 0x22, 0x13, 0x11, 0x0
+ 4, 0xFF, 0x20, 0x25, 0x20, 0x0, 0x22, 0x33, 0x11, 0x0
+ 5, 0xFF, 0x20, 0x20, 0x2F, 0x0, 0x22, 0x32, 0x11, 0x0
+ 0xFF
+};
+
+
+static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload,
+ u8 DATAAload, u32 *AddrTmgCTL, u32 *ODC_CTL,
+ u32 *CMDmode);
+{
+ u8 *p;
+
+ *AddrTmgCTL = 0;
+ *ODC_CTL = 0;
+ *CMDmode = 1;
+
+ // FIXME: add Ax support
+ if(MAAdimms == 0) {
+ *ODC_CTL = 0x00111222;
+ if(Speed == 3)
+ *AddrTmgCTL = 0x00202220;
+ else if (Speed == 2)
+ *AddrTmgCTL = 0x002F2F00;
+ else if (Speed == 1)
+ *AddrTmgCTL = 0x002F2F00;
+ else if (Speed == 4)
+ *AddrTmgCTL = 0x00202520;
+ else if (Speed == 4)
+ *AddrTmgCTL = 0x002F2020;
+ else
+ *AddrTmgCTL = 0x002F2F2F;
+ } else if(MAAdimms == 1) {
+ if(Speed == 4) {
+ *CMDmode = 2;
+ *AddrTmgCTL = 0x00202520;
+ *ODC_CTL = 0x00113222;
+ } else if(Speed == 4) {
+ *CMDmode = 2;
+ *AddrTmgCTL = 0x002F2020;
+ *ODC_CTL = 0x00113222;
+ } else {
+ *CMDmode = 1;
+ *ODC_CTL = 0x00111222;
+ if(Speed == 3) {
+ *AddrTmgCTL = 0x00202220;
+ } else if(Speed == 2) {
+ if (MAAload == 4)
+ *AddrTmgCTL = 0x002B2F00;
+ else if (MAAload == 16)
+ *AddrTmgCTL = 0x002B2F00;
+ else if (MAAload == 8)
+ *AddrTmgCTL = 0x002F2F00;
+ else
+ *AddrTmgCTL = 0x002F2F00;
+ } else if(Speed == 1) {
+ *AddrTmgCTL = 0x002F2F00;
+ } else if(Speed == 5) {
+ *AddrTmgCTL = 0x002F2020;
+ } else {
+ *AddrTmgCTL = 0x002F2F2F;
+ }
+ }
+ } else {
+ *CMDmode = 2;
+ p = Table_ATC_ODC_D_Bx;
+ do {
+ if(Speed == *p) {
+ if(MAAload <= *(p+1)) {
+ *AddrTmgCTL = stream_to_int(p+2);
+ *ODC_CTL = stream_to_int(p+6);
+ break;
+ }
+ }
+ p+=10;
+ } while (0xFF == *p);
+
+}
diff --git a/src/northbridge/amd/amdmct/mct/mctchi_d.c b/src/northbridge/amd/amdmct/mct/mctchi_d.c
new file mode 100644
index 0000000000..f50fd6d62e
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctchi_d.c
@@ -0,0 +1,130 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+
+void InterleaveChannels_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+
+ u8 Node;
+ u32 DramBase, DctSelBase;
+ u8 DctSelIntLvAddr, DctSelHi;
+ u8 HoleValid = 0;
+ u32 HoleSize, HoleBase = 0;
+ u32 val, tmp;
+ u32 dct0_size, dct1_size;
+ u8 enabled;
+ struct DCTStatStruc *pDCTstat;
+
+ /* HoleValid - indicates whether the current Node contains hole.
+ * HoleSize - indicates whether there is IO hole in the whole system
+ * memory.
+ */
+
+ /* call back to wrapper not needed ManualChannelInterleave_D(); */
+ /* call back - DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv);*/ /* override interleave */
+ // FIXME: Check for Cx
+ DctSelIntLvAddr = 5; /* use default: Enable channel interleave */
+ enabled = 1; /* with Hash*: exclusive OR of address bits[20:16, 6]. */
+ beforeInterleaveChannels_D(pDCTstatA, &enabled);
+
+ if (enabled) {
+ DctSelIntLvAddr >>= 1;
+ HoleSize = 0;
+ if ((pMCTstat->GStatus & (1 << GSB_SoftHole)) ||
+ (pMCTstat->GStatus & (1 << GSB_HWHole))) {
+ if (pMCTstat->HoleBase) {
+ HoleBase = pMCTstat->HoleBase >> 8;
+ HoleSize = HoleBase & 0xFFFF0000;
+ HoleSize |= ((~HoleBase) + 1) & 0xFFFF;
+ }
+ }
+ Node = 0;
+ while (Node < MAX_NODES_SUPPORTED) {
+ pDCTstat = pDCTstatA + Node;
+ val = Get_NB32(pDCTstat->dev_map, 0xF0);
+ if (val & (1 << DramHoleValid))
+ HoleValid = 1;
+ if (!pDCTstat->GangedMode && pDCTstat->DIMMValidDCT[0] && pDCTstat->DIMMValidDCT[1]) {
+ DramBase = pDCTstat->NodeSysBase >> 8;
+ dct1_size = ((pDCTstat->NodeSysLimit) + 2) >> 8;
+ dct0_size = Get_NB32(pDCTstat->dev_dct, 0x114);
+ if (dct0_size >= 0x10000) {
+ dct0_size -= HoleSize;
+ }
+
+ dct0_size -= DramBase;
+ dct1_size -= dct0_size;
+ DctSelHi = 0x05; /* DctSelHiRngEn = 1, DctSelHi = 0 */
+ if (dct1_size == dct0_size) {
+ dct1_size = 0;
+ DctSelHi = 0x04; /* DctSelHiRngEn = 0 */
+ } else if (dct1_size > dct0_size ) {
+ dct1_size = dct0_size;
+ DctSelHi = 0x07; /* DctSelHiRngEn = 1, DctSelHi = 1 */
+ }
+ dct0_size = dct1_size;
+ dct0_size += DramBase;
+ dct0_size += dct1_size;
+ if (dct0_size >= HoleBase) /* if DctSelBaseAddr > HoleBase */
+ dct0_size += HoleBase;
+ DctSelBase = dct0_size;
+
+ if (dct1_size == 0)
+ dct0_size = 0;
+ dct0_size -= dct1_size; /* DctSelBaseOffset = DctSelBaseAddr - Interleaved region */
+ Set_NB32(pDCTstat->dev_dct, 0x114, dct0_size);
+
+ if (dct1_size == 0)
+ dct1_size = DctSelBase;
+ val = Get_NB32(pDCTstat->dev_dct, 0x110);
+ val &= 0x7F8;
+ val |= dct1_size;
+ val |= DctSelHi;
+ val |= (DctSelIntLvAddr << 6) & 0xFF;
+ Set_NB32(pDCTstat->dev_dct, 0x110, val);
+ print_tx("InterleaveChannels: DRAM Controller Select Low Register = ", val);
+
+ if (HoleValid) {
+ tmp = DramBase;
+ val = DctSelBase;
+ if (val < HoleBase) { /* DctSelBaseAddr < DramHoleBase */
+ val -= DramBase;
+ val >>= 1;
+ tmp += val;
+ }
+ tmp += HoleSize;
+ val = Get_NB32(pDCTstat->dev_map, 0xF0); /* DramHoleOffset */
+ val &= 0x7F;
+ val |= (tmp & 0xFF);
+ Set_NB32(pDCTstat->dev_map, 0xF0, val);
+print_tx("InterleaveChannels:0xF0 = ", val);
+
+ }
+ }
+ print_tx("InterleaveChannels_D: Node ", Node);
+ print_tx("InterleaveChannels_D: Status ", pDCTstat->Status);
+ print_tx("InterleaveChannels_D: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("InterleaveChannels_D: ErrCode ", pDCTstat->ErrCode);
+ Node++;
+ }
+ }
+ print_t("InterleaveChannels_D: Done\n");
+}
diff --git a/src/northbridge/amd/amdmct/mct/mctcsi_d.c b/src/northbridge/amd/amdmct/mct/mctcsi_d.c
new file mode 100644
index 0000000000..f2f5cedada
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctcsi_d.c
@@ -0,0 +1,147 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+/* Low swap bit vs bank size encoding (physical, not logical address bit)
+ * ;To calculate the number by hand, add the number of Bank address bits
+ * ;(2 or 3) to the number of column address bits, plus 3 (the logical
+ * ;page size), and subtract 8.
+ */
+static const u8 Tab_int_D[] = { 6,7,7,8,8,8,8,8,9,9,8,9 };
+
+void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u8 ChipSel, EnChipSels;
+ u32 AddrLoMask, AddrHiMask;
+ u32 AddrLoMaskN, AddrHiMaskN, MemSize = 0;
+ u8 DoIntlv, _CsIntCap;
+ u32 BitDelta, BankEncd = 0;
+
+ u32 dev;
+ u32 reg;
+ u32 reg_off;
+ u32 val;
+ u32 val_lo, val_hi;
+
+ DoIntlv = mctGet_NVbits(NV_BankIntlv);
+ _CsIntCap = 0;
+ EnChipSels = 0;
+
+ dev = pDCTstat->dev_dct;
+ reg_off = 0x100 * dct;
+
+ ChipSel = 0; /* Find out if current configuration is capable */
+ while (DoIntlv && (ChipSel < MAX_CS_SUPPORTED)) {
+ reg = 0x40+(ChipSel<<2) + reg_off; /* Dram CS Base 0 */
+ val = Get_NB32(dev, reg);
+ if ( val & (1<<CSEnable)) {
+ EnChipSels++;
+ reg = 0x60+((ChipSel>>1)<<2)+reg_off; /*Dram CS Mask 0 */
+ val = Get_NB32(dev, reg);
+ val >>= 19;
+ val &= 0x3ff;
+ val++;
+ if (EnChipSels == 1)
+ MemSize = val;
+ else
+ /*If mask sizes not same then skip */
+ if (val != MemSize)
+ break;
+ reg = 0x80 + reg_off; /*Dram Bank Addressing */
+ val = Get_NB32(dev, reg);
+ val >>= (ChipSel>>1)<<2;
+ val &= 0x0f;
+ if(EnChipSels == 1)
+ BankEncd = val;
+ else
+ /*If number of Rows/Columns not equal, skip */
+ if (val != BankEncd)
+ break;
+ }
+ ChipSel++;
+ }
+ if (ChipSel == MAX_CS_SUPPORTED) {
+ if ((EnChipSels == 2) || (EnChipSels == 4) || (EnChipSels == 8))
+ _CsIntCap = 1;
+ }
+
+ if (DoIntlv) {
+ if(!_CsIntCap) {
+ pDCTstat->ErrStatus |= 1<<SB_BkIntDis;
+ DoIntlv = 0;
+ }
+ }
+
+ if(DoIntlv) {
+ val = Tab_int_D[BankEncd];
+ if (pDCTstat->Status & (1<<SB_128bitmode))
+ val++;
+
+ AddrLoMask = (EnChipSels - 1) << val;
+ AddrLoMaskN = ~AddrLoMask;
+
+ val = bsf(MemSize) + 19;
+ AddrHiMask = (EnChipSels -1) << val;
+ AddrHiMaskN = ~AddrHiMask;
+
+ BitDelta = bsf(AddrHiMask) - bsf(AddrLoMask);
+
+ for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel++) {
+ reg = 0x40+(ChipSel<<2) + reg_off; /*Dram CS Base 0 */
+ val = Get_NB32(dev, reg);
+ if (val & 3) {
+ val_lo = val & AddrLoMask;
+ val_hi = val & AddrHiMask;
+ val &= AddrLoMaskN;
+ val &= AddrHiMaskN;
+ val_lo <<= BitDelta;
+ val_hi >>= BitDelta;
+ val |= val_lo;
+ val |= val_hi;
+ Set_NB32(dev, reg, val);
+
+ if(ChipSel & 1)
+ continue;
+
+ reg = 0x60 + ((ChipSel>>1)<<2) + reg_off; /*Dram CS Mask 0 */
+ val = Get_NB32(dev, reg);
+ val_lo = val & AddrLoMask;
+ val_hi = val & AddrHiMask;
+ val &= AddrLoMaskN;
+ val &= AddrHiMaskN;
+ val_lo <<= BitDelta;
+ val_hi >>= BitDelta;
+ val |= val_lo;
+ val |= val_hi;
+ Set_NB32(dev, reg, val);
+ }
+ }
+ print_t("InterleaveBanks_D: Banks Interleaved ");
+ } /* DoIntlv */
+
+// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
+
+ print_tx("InterleaveBanks_D: Status ", pDCTstat->Status);
+ print_tx("InterleaveBanks_D: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("InterleaveBanks_D: ErrCode ", pDCTstat->ErrCode);
+ print_t("InterleaveBanks_D: Done\n");
+}
+
+
diff --git a/src/northbridge/amd/amdmct/mct/mctdqs_d.c b/src/northbridge/amd/amdmct/mct/mctdqs_d.c
new file mode 100644
index 0000000000..ae87966367
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctdqs_d.c
@@ -0,0 +1,1216 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+static void CalcEccDQSPos_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u16 like,
+ u8 scale, u8 ChipSel);
+static void GetDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 ChipSel);
+static u8 MiddleDQS_D(u8 min, u8 max);
+static void TrainReadDQS_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 cs_start);
+static void TrainWriteDQS_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 cs_start);
+static void WriteDQSTestPattern_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 TestAddr_lo);
+static void WriteL18TestPattern_D(struct DCTStatStruc *pDCTstat,
+ u32 TestAddr_lo);
+static void WriteL9TestPattern_D(struct DCTStatStruc *pDCTstat,
+ u32 TestAddr_lo);
+static u8 CompareDQSTestPattern_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 addr_lo);
+static void FlushDQSTestPattern_D(struct DCTStatStruc *pDCTstat,
+ u32 addr_lo);
+static void SetTargetWTIO_D(u32 TestAddr);
+static void ResetTargetWTIO_D(void);
+static void ReadDQSTestPattern_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 TestAddr_lo);
+void ResetDCTWrPtr_D(u32 dev, u32 index_reg, u32 index);
+u8 mct_DisableDimmEccEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void mct_SetDQSDelayCSR_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 ChipSel);
+static void mct_SetDQSDelayAllCSR_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 cs_start);
+u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Channel,
+ u8 receiver, u8 *valid);
+static void SetupDqsPattern_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 *buffer);
+
+#define DQS_TRAIN_DEBUG 0
+
+static inline void print_debug_dqs(const char *str, u32 val, u8 level)
+{
+#if DQS_TRAIN_DEBUG > 0
+ if (DQS_TRAIN_DEBUG >= level) {
+ printk_debug("%s%x\n", str, val);
+ }
+#endif
+}
+
+static inline void print_debug_dqs_pair(const char *str, u32 val, const char *str2, u32 val2, u8 level)
+{
+#if DQS_TRAIN_DEBUG > 0
+ if (DQS_TRAIN_DEBUG >= level) {
+ printk_debug("%s%08x%s%08x\n", str, val, str2, val2);
+ }
+#endif
+}
+
+/*Warning: These must be located so they do not cross a logical 16-bit segment boundary!*/
+const static u32 TestPatternJD1a_D[] = {
+ 0x00000000,0x00000000,0xFFFFFFFF,0xFFFFFFFF, /* QW0-1, ALL-EVEN */
+ 0x00000000,0x00000000,0x00000000,0x00000000, /* QW2-3, ALL-EVEN */
+ 0x00000000,0x00000000,0xFFFFFFFF,0xFFFFFFFF, /* QW4-5, ALL-EVEN */
+ 0x00000000,0x00000000,0x00000000,0x00000000, /* QW6-7, ALL-EVEN */
+ 0xFeFeFeFe,0xFeFeFeFe,0x01010101,0x01010101, /* QW0-1, DQ0-ODD */
+ 0xFeFeFeFe,0xFeFeFeFe,0x01010101,0x01010101, /* QW2-3, DQ0-ODD */
+ 0x01010101,0x01010101,0xFeFeFeFe,0xFeFeFeFe, /* QW4-5, DQ0-ODD */
+ 0xFeFeFeFe,0xFeFeFeFe,0x01010101,0x01010101, /* QW6-7, DQ0-ODD */
+ 0x02020202,0x02020202,0x02020202,0x02020202, /* QW0-1, DQ1-ODD */
+ 0xFdFdFdFd,0xFdFdFdFd,0xFdFdFdFd,0xFdFdFdFd, /* QW2-3, DQ1-ODD */
+ 0xFdFdFdFd,0xFdFdFdFd,0x02020202,0x02020202, /* QW4-5, DQ1-ODD */
+ 0x02020202,0x02020202,0x02020202,0x02020202, /* QW6-7, DQ1-ODD */
+ 0x04040404,0x04040404,0xfBfBfBfB,0xfBfBfBfB, /* QW0-1, DQ2-ODD */
+ 0x04040404,0x04040404,0x04040404,0x04040404, /* QW2-3, DQ2-ODD */
+ 0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB, /* QW4-5, DQ2-ODD */
+ 0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB, /* QW6-7, DQ2-ODD */
+ 0x08080808,0x08080808,0xF7F7F7F7,0xF7F7F7F7, /* QW0-1, DQ3-ODD */
+ 0x08080808,0x08080808,0x08080808,0x08080808, /* QW2-3, DQ3-ODD */
+ 0xF7F7F7F7,0xF7F7F7F7,0x08080808,0x08080808, /* QW4-5, DQ3-ODD */
+ 0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7, /* QW6-7, DQ3-ODD */
+ 0x10101010,0x10101010,0x10101010,0x10101010, /* QW0-1, DQ4-ODD */
+ 0xeFeFeFeF,0xeFeFeFeF,0x10101010,0x10101010, /* QW2-3, DQ4-ODD */
+ 0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF, /* QW4-5, DQ4-ODD */
+ 0xeFeFeFeF,0xeFeFeFeF,0x10101010,0x10101010, /* QW6-7, DQ4-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW0-1, DQ5-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0x20202020,0x20202020, /* QW2-3, DQ5-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW4-5, DQ5-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW6-7, DQ5-ODD */
+ 0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf, /* QW0-1, DQ6-ODD */
+ 0x40404040,0x40404040,0xBfBfBfBf,0xBfBfBfBf, /* QW2-3, DQ6-ODD */
+ 0x40404040,0x40404040,0xBfBfBfBf,0xBfBfBfBf, /* QW4-5, DQ6-ODD */
+ 0x40404040,0x40404040,0xBfBfBfBf,0xBfBfBfBf, /* QW6-7, DQ6-ODD */
+ 0x80808080,0x80808080,0x7F7F7F7F,0x7F7F7F7F, /* QW0-1, DQ7-ODD */
+ 0x80808080,0x80808080,0x7F7F7F7F,0x7F7F7F7F, /* QW2-3, DQ7-ODD */
+ 0x80808080,0x80808080,0x7F7F7F7F,0x7F7F7F7F, /* QW4-5, DQ7-ODD */
+ 0x80808080,0x80808080,0x80808080,0x80808080 /* QW6-7, DQ7-ODD */
+};
+const static u32 TestPatternJD1b_D[] = {
+ 0x00000000,0x00000000,0x00000000,0x00000000, /* QW0,CHA-B, ALL-EVEN */
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF, /* QW1,CHA-B, ALL-EVEN */
+ 0x00000000,0x00000000,0x00000000,0x00000000, /* QW2,CHA-B, ALL-EVEN */
+ 0x00000000,0x00000000,0x00000000,0x00000000, /* QW3,CHA-B, ALL-EVEN */
+ 0x00000000,0x00000000,0x00000000,0x00000000, /* QW4,CHA-B, ALL-EVEN */
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF, /* QW5,CHA-B, ALL-EVEN */
+ 0x00000000,0x00000000,0x00000000,0x00000000, /* QW6,CHA-B, ALL-EVEN */
+ 0x00000000,0x00000000,0x00000000,0x00000000, /* QW7,CHA-B, ALL-EVEN */
+ 0xFeFeFeFe,0xFeFeFeFe,0xFeFeFeFe,0xFeFeFeFe, /* QW0,CHA-B, DQ0-ODD */
+ 0x01010101,0x01010101,0x01010101,0x01010101, /* QW1,CHA-B, DQ0-ODD */
+ 0xFeFeFeFe,0xFeFeFeFe,0xFeFeFeFe,0xFeFeFeFe, /* QW2,CHA-B, DQ0-ODD */
+ 0x01010101,0x01010101,0x01010101,0x01010101, /* QW3,CHA-B, DQ0-ODD */
+ 0x01010101,0x01010101,0x01010101,0x01010101, /* QW4,CHA-B, DQ0-ODD */
+ 0xFeFeFeFe,0xFeFeFeFe,0xFeFeFeFe,0xFeFeFeFe, /* QW5,CHA-B, DQ0-ODD */
+ 0xFeFeFeFe,0xFeFeFeFe,0xFeFeFeFe,0xFeFeFeFe, /* QW6,CHA-B, DQ0-ODD */
+ 0x01010101,0x01010101,0x01010101,0x01010101, /* QW7,CHA-B, DQ0-ODD */
+ 0x02020202,0x02020202,0x02020202,0x02020202, /* QW0,CHA-B, DQ1-ODD */
+ 0x02020202,0x02020202,0x02020202,0x02020202, /* QW1,CHA-B, DQ1-ODD */
+ 0xFdFdFdFd,0xFdFdFdFd,0xFdFdFdFd,0xFdFdFdFd, /* QW2,CHA-B, DQ1-ODD */
+ 0xFdFdFdFd,0xFdFdFdFd,0xFdFdFdFd,0xFdFdFdFd, /* QW3,CHA-B, DQ1-ODD */
+ 0xFdFdFdFd,0xFdFdFdFd,0xFdFdFdFd,0xFdFdFdFd, /* QW4,CHA-B, DQ1-ODD */
+ 0x02020202,0x02020202,0x02020202,0x02020202, /* QW5,CHA-B, DQ1-ODD */
+ 0x02020202,0x02020202,0x02020202,0x02020202, /* QW6,CHA-B, DQ1-ODD */
+ 0x02020202,0x02020202,0x02020202,0x02020202, /* QW7,CHA-B, DQ1-ODD */
+ 0x04040404,0x04040404,0x04040404,0x04040404, /* QW0,CHA-B, DQ2-ODD */
+ 0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB, /* QW1,CHA-B, DQ2-ODD */
+ 0x04040404,0x04040404,0x04040404,0x04040404, /* QW2,CHA-B, DQ2-ODD */
+ 0x04040404,0x04040404,0x04040404,0x04040404, /* QW3,CHA-B, DQ2-ODD */
+ 0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB, /* QW4,CHA-B, DQ2-ODD */
+ 0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB, /* QW5,CHA-B, DQ2-ODD */
+ 0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB, /* QW6,CHA-B, DQ2-ODD */
+ 0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB,0xfBfBfBfB, /* QW7,CHA-B, DQ2-ODD */
+ 0x08080808,0x08080808,0x08080808,0x08080808, /* QW0,CHA-B, DQ3-ODD */
+ 0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7, /* QW1,CHA-B, DQ3-ODD */
+ 0x08080808,0x08080808,0x08080808,0x08080808, /* QW2,CHA-B, DQ3-ODD */
+ 0x08080808,0x08080808,0x08080808,0x08080808, /* QW3,CHA-B, DQ3-ODD */
+ 0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7, /* QW4,CHA-B, DQ3-ODD */
+ 0x08080808,0x08080808,0x08080808,0x08080808, /* QW5,CHA-B, DQ3-ODD */
+ 0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7, /* QW6,CHA-B, DQ3-ODD */
+ 0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7,0xF7F7F7F7, /* QW7,CHA-B, DQ3-ODD */
+ 0x10101010,0x10101010,0x10101010,0x10101010, /* QW0,CHA-B, DQ4-ODD */
+ 0x10101010,0x10101010,0x10101010,0x10101010, /* QW1,CHA-B, DQ4-ODD */
+ 0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF, /* QW2,CHA-B, DQ4-ODD */
+ 0x10101010,0x10101010,0x10101010,0x10101010, /* QW3,CHA-B, DQ4-ODD */
+ 0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF, /* QW4,CHA-B, DQ4-ODD */
+ 0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF, /* QW5,CHA-B, DQ4-ODD */
+ 0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF,0xeFeFeFeF, /* QW6,CHA-B, DQ4-ODD */
+ 0x10101010,0x10101010,0x10101010,0x10101010, /* QW7,CHA-B, DQ4-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW0,CHA-B, DQ5-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW1,CHA-B, DQ5-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW2,CHA-B, DQ5-ODD */
+ 0x20202020,0x20202020,0x20202020,0x20202020, /* QW3,CHA-B, DQ5-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW4,CHA-B, DQ5-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW5,CHA-B, DQ5-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW6,CHA-B, DQ5-ODD */
+ 0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF,0xdFdFdFdF, /* QW7,CHA-B, DQ5-ODD */
+ 0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf, /* QW0,CHA-B, DQ6-ODD */
+ 0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf, /* QW1,CHA-B, DQ6-ODD */
+ 0x40404040,0x40404040,0x40404040,0x40404040, /* QW2,CHA-B, DQ6-ODD */
+ 0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf, /* QW3,CHA-B, DQ6-ODD */
+ 0x40404040,0x40404040,0x40404040,0x40404040, /* QW4,CHA-B, DQ6-ODD */
+ 0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf, /* QW5,CHA-B, DQ6-ODD */
+ 0x40404040,0x40404040,0x40404040,0x40404040, /* QW6,CHA-B, DQ6-ODD */
+ 0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf,0xBfBfBfBf, /* QW7,CHA-B, DQ6-ODD */
+ 0x80808080,0x80808080,0x80808080,0x80808080, /* QW0,CHA-B, DQ7-ODD */
+ 0x7F7F7F7F,0x7F7F7F7F,0x7F7F7F7F,0x7F7F7F7F, /* QW1,CHA-B, DQ7-ODD */
+ 0x80808080,0x80808080,0x80808080,0x80808080, /* QW2,CHA-B, DQ7-ODD */
+ 0x7F7F7F7F,0x7F7F7F7F,0x7F7F7F7F,0x7F7F7F7F, /* QW3,CHA-B, DQ7-ODD */
+ 0x80808080,0x80808080,0x80808080,0x80808080, /* QW4,CHA-B, DQ7-ODD */
+ 0x7F7F7F7F,0x7F7F7F7F,0x7F7F7F7F,0x7F7F7F7F, /* QW5,CHA-B, DQ7-ODD */
+ 0x80808080,0x80808080,0x80808080,0x80808080, /* QW6,CHA-B, DQ7-ODD */
+ 0x80808080,0x80808080,0x80808080,0x80808080 /* QW7,CHA-B, DQ7-ODD */
+};
+
+const u8 Table_DQSRcvEn_Offset[] = {0x00,0x01,0x10,0x11};
+
+
+void TrainReceiverEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA, u8 Pass)
+{
+ u8 Node;
+ struct DCTStatStruc *pDCTstat;
+
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ pDCTstat = pDCTstatA + Node;
+
+/*FIXME: needed? if (!pDCTstat->NodePresent)
+ break;
+*/
+ if (pDCTstat->DCTSysLimit) {
+ mct_TrainRcvrEn_D(pMCTstat, pDCTstat, Pass);
+ }
+ }
+}
+
+
+static void SetEccDQSRdWrPos_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 ChipSel)
+{
+ u8 channel;
+ u8 direction;
+
+ for (channel = 0; channel < 2; channel++){
+ for (direction = 0; direction < 2; direction++) {
+ pDCTstat->Channel = channel; /* Channel A or B */
+ pDCTstat->Direction = direction; /* Read or write */
+ CalcEccDQSPos_D(pMCTstat, pDCTstat, pDCTstat->CH_EccDQSLike[channel], pDCTstat->CH_EccDQSScale[channel], ChipSel);
+ print_debug_dqs_pair("\t\tSetEccDQSRdWrPos: channel ", channel, direction==DQS_READDIR? " R dqs_delay":" W dqs_delay", pDCTstat->DQSDelay, 2);
+ pDCTstat->ByteLane = 8;
+ StoreDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
+ mct_SetDQSDelayCSR_D(pMCTstat, pDCTstat, ChipSel);
+ }
+ }
+}
+
+
+
+static void CalcEccDQSPos_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u16 like, u8 scale, u8 ChipSel)
+{
+ u8 DQSDelay0, DQSDelay1;
+ u16 DQSDelay;
+
+ pDCTstat->ByteLane = like & 0xff;
+ GetDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
+ DQSDelay0 = pDCTstat->DQSDelay;
+
+ pDCTstat->ByteLane = (like >> 8) & 0xff;
+ GetDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
+ DQSDelay1 = pDCTstat->DQSDelay;
+
+ if (DQSDelay0>DQSDelay1) {
+ DQSDelay = DQSDelay0 - DQSDelay1;
+ } else {
+ DQSDelay = DQSDelay1 - DQSDelay0;
+ }
+
+ DQSDelay = DQSDelay * (~scale);
+
+ DQSDelay += 0x80; // round it
+
+ DQSDelay >>= 8; // /256
+
+ if (DQSDelay0>DQSDelay1) {
+ DQSDelay = DQSDelay1 - DQSDelay;
+ } else {
+ DQSDelay += DQSDelay1;
+ }
+
+ pDCTstat->DQSDelay = (u8)DQSDelay;
+}
+
+
+static void TrainDQSRdWrPos_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 cs_start)
+{
+ u32 Errors;
+ u8 Channel, DQSWrDelay;
+ u8 _DisableDramECC = 0;
+ u32 PatternBuffer[292];
+ u8 _Wrap32Dis = 0, _SSE2 = 0;
+ u8 dqsWrDelay_end;
+
+ u32 addr;
+ u32 cr4;
+ u32 lo, hi;
+
+ print_debug_dqs("\nTrainDQSRdWrPos: Node_ID ", pDCTstat->Node_ID, 0);
+ cr4 = read_cr4();
+ if (cr4 & (1<<9)) {
+ _SSE2 = 1;
+ }
+ cr4 |= (1<<9); /* OSFXSR enable SSE2 */
+ write_cr4(cr4);
+
+ addr = HWCR;
+ _RDMSR(addr, &lo, &hi);
+ if (lo & (1<<17)) {
+ _Wrap32Dis = 1;
+ }
+ lo |= (1<<17); /* HWCR.wrap32dis */
+ _WRMSR(addr, lo, hi); /* allow 64-bit memory references in real mode */
+
+ /* Disable ECC correction of reads on the dram bus. */
+ _DisableDramECC = mct_DisableDimmEccEn_D(pMCTstat, pDCTstat);
+
+ SetupDqsPattern_D(pMCTstat, pDCTstat, PatternBuffer);
+
+ /* mct_BeforeTrainDQSRdWrPos_D */
+ dqsWrDelay_end = 0x20;
+
+ Errors = 0;
+ for (Channel = 0; Channel < 2; Channel++) {
+ print_debug_dqs("\tTrainDQSRdWrPos: 1 Channel ",Channel, 1);
+ pDCTstat->Channel = Channel;
+
+ if (pDCTstat->DIMMValidDCT[Channel] == 0) /* mct_BeforeTrainDQSRdWrPos_D */
+ continue;
+
+ for ( DQSWrDelay = 0; DQSWrDelay < dqsWrDelay_end; DQSWrDelay++) {
+ pDCTstat->DQSDelay = DQSWrDelay;
+ pDCTstat->Direction = DQS_WRITEDIR;
+ mct_SetDQSDelayAllCSR_D(pMCTstat, pDCTstat, cs_start);
+
+ print_debug_dqs("\t\tTrainDQSRdWrPos: 21 DQSWrDelay ", DQSWrDelay, 2);
+ TrainReadDQS_D(pMCTstat, pDCTstat, cs_start);
+
+ print_debug_dqs("\t\tTrainDQSRdWrPos: 22 TrainErrors ",pDCTstat->TrainErrors, 2);
+ if (pDCTstat->TrainErrors == 0) {
+ break;
+ }
+ Errors |= pDCTstat->TrainErrors;
+ }
+ if (DQSWrDelay < dqsWrDelay_end) {
+ Errors = 0;
+
+ print_debug_dqs("\tTrainDQSRdWrPos: 231 DQSWrDelay ", DQSWrDelay, 1);
+ TrainWriteDQS_D(pMCTstat, pDCTstat, cs_start);
+ }
+ print_debug_dqs("\tTrainDQSRdWrPos: 232 Errors ", Errors, 1);
+ pDCTstat->ErrStatus |= Errors;
+ }
+
+#if DQS_TRAIN_DEBUG > 0
+ {
+ u8 val;
+ u8 i;
+ u8 Channel, Receiver, Dir;
+ u8 *p;
+
+ for (Dir = 0; Dir < 2; Dir++) {
+ if (Dir == 0) {
+ print_debug("TrainDQSRdWrPos: CH_D_DIR_B_DQS WR:\n");
+ } else {
+ print_debug("TrainDQSRdWrPos: CH_D_DIR_B_DQS RD:\n");
+ }
+ for (Channel = 0; Channel < 2; Channel++) {
+ print_debug("Channel:"); print_debug_hex8(Channel); print_debug("\n");
+ for (Receiver = cs_start; Receiver < (cs_start + 2); Receiver += 2) {
+ print_debug("\t\tReceiver:"); print_debug_hex8(Receiver);
+ p = pDCTstat->CH_D_DIR_B_DQS[Channel][Receiver >> 1][Dir];
+ print_debug(": ");
+ for (i=0;i<8; i++) {
+ val = p[i];
+ print_debug_hex8(val);
+ print_debug(" ");
+ }
+ print_debug("\n");
+ }
+ }
+ }
+
+ }
+#endif
+
+ if (_DisableDramECC) {
+ mct_EnableDimmEccEn_D(pMCTstat, pDCTstat, _DisableDramECC);
+ }
+ if (!_Wrap32Dis) {
+ addr = HWCR;
+ _RDMSR(addr, &lo, &hi);
+ lo &= ~(1<<17); /* restore HWCR.wrap32dis */
+ _WRMSR(addr, lo, hi);
+ }
+ if (!_SSE2){
+ cr4 = read_cr4();
+ cr4 &= ~(1<<9); /* restore cr4.OSFXSR */
+ write_cr4(cr4);
+ }
+
+ print_tx("TrainDQSRdWrPos: Status ", pDCTstat->Status);
+ print_tx("TrainDQSRdWrPos: TrainErrors ", pDCTstat->TrainErrors);
+ print_tx("TrainDQSRdWrPos: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("TrainDQSRdWrPos: ErrCode ", pDCTstat->ErrCode);
+ print_t("TrainDQSRdWrPos: Done\n");
+}
+
+
+static void SetupDqsPattern_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u32 *buffer)
+{
+ /* 1. Set the Pattern type (0 or 1) in DCTStatstruc.Pattern
+ * 2. Copy the pattern from ROM to Cache, aligning on 16 byte boundary
+ * 3. Set the ptr to Cacheable copy in DCTStatstruc.PtrPatternBufA
+ */
+
+ u32 *buf;
+ u16 i;
+
+ buf = (u32 *)(((u32)buffer + 0x10) & (0xfffffff0));
+ if (pDCTstat->Status & (1<<SB_128bitmode)) {
+ pDCTstat->Pattern = 1; /* 18 cache lines, alternating qwords */
+ for (i=0; i<16*18; i++)
+ buf[i] = TestPatternJD1b_D[i];
+ } else {
+ pDCTstat->Pattern = 0; /* 9 cache lines, sequential qwords */
+ for (i=0; i<16*9; i++)
+ buf[i] = TestPatternJD1a_D[i];
+ }
+ pDCTstat->PtrPatternBufA = (u32)buf;
+}
+
+
+static void TrainDQSPos_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 cs_start)
+{
+ u32 Errors;
+ u8 ChipSel, DQSDelay;
+ u8 RnkDlySeqPassMin,RnkDlySeqPassMax, RnkDlyFilterMin, RnkDlyFilterMax;
+ u8 LastTest;
+ u32 TestAddr;
+ u8 ByteLane;
+ u8 MutualCSPassW[64];
+ u8 BanksPresent;
+ u8 dqsDelay_end;
+ u8 tmp, valid;
+
+// print_tx("TrainDQSPos: Node_ID", pDCTstat->Node_ID);
+// print_tx("TrainDQSPos: Direction", pDCTstat->Direction);
+
+ /* MutualCSPassW: each byte represents a bitmap of pass/fail per
+ * ByteLane. The indext within MutualCSPassW is the delay value
+ * given the results.
+ */
+
+
+ print_debug_dqs("\t\t\tTrainDQSPos begin ", 0, 3);
+
+ Errors = 0;
+ BanksPresent = 0;
+
+ if (pDCTstat->Direction == DQS_READDIR) {
+ dqsDelay_end = 64;
+ mct_AdjustDelayRange_D(pMCTstat, pDCTstat, &dqsDelay_end);
+ } else {
+ dqsDelay_end = 32;
+ }
+
+ /* Bitmapped status per delay setting, 0xff=All positions
+ * passing (1= PASS). Set the entire array.
+ */
+ for (DQSDelay=0; DQSDelay<64; DQSDelay++) {
+ MutualCSPassW[DQSDelay] = 0xFF;
+ }
+
+ for (ChipSel = cs_start; ChipSel < (cs_start + 2); ChipSel++) { /* logical register chipselects 0..7 */
+ print_debug_dqs("\t\t\t\tTrainDQSPos: 11 ChipSel ", ChipSel, 4);
+
+ if (!mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, pDCTstat->Channel, ChipSel)) {
+ print_debug_dqs("\t\t\t\tmct_RcvrRankEnabled_D CS not enabled ", ChipSel, 4);
+ continue;
+ }
+
+ BanksPresent = 1; /* flag for atleast one bank is present */
+ TestAddr = mct_GetMCTSysAddr_D(pMCTstat, pDCTstat, pDCTstat->Channel, ChipSel, &valid);
+ if (!valid) {
+ print_debug_dqs("\t\t\t\tAddress not supported on current CS ", TestAddr, 4);
+ continue;
+ }
+
+ print_debug_dqs("\t\t\t\tTrainDQSPos: 12 TestAddr ", TestAddr, 4);
+ SetUpperFSbase(TestAddr); /* fs:eax=far ptr to target */
+
+ if (pDCTstat->Direction==DQS_READDIR) {
+ print_debug_dqs("\t\t\t\tTrainDQSPos: 13 for read ", 0, 4);
+ WriteDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr<<8);
+ }
+
+ for (DQSDelay = 0; DQSDelay < dqsDelay_end; DQSDelay++) {
+ print_debug_dqs("\t\t\t\t\tTrainDQSPos: 141 DQSDelay ", DQSDelay, 5);
+ if (MutualCSPassW[DQSDelay] == 0)
+ continue; //skip current delay value if other chipselects have failed all 8 bytelanes
+ pDCTstat->DQSDelay = DQSDelay;
+ mct_SetDQSDelayAllCSR_D(pMCTstat, pDCTstat, cs_start);
+ print_debug_dqs("\t\t\t\t\tTrainDQSPos: 142 MutualCSPassW ", MutualCSPassW[DQSDelay], 5);
+
+ if (pDCTstat->Direction == DQS_WRITEDIR) {
+ print_debug_dqs("\t\t\t\t\tTrainDQSPos: 143 for write", 0, 5);
+ WriteDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr<<8);
+ }
+
+ print_debug_dqs("\t\t\t\t\tTrainDQSPos: 144 Pattern ", pDCTstat->Pattern, 5);
+ ReadDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr<<8);
+// print_debug_dqs("\t\t\t\t\tTrainDQSPos: 145 MutualCSPassW ", MutualCSPassW[DQSDelay], 5);
+ tmp = CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8); /* 0=fail, 1=pass */
+
+ if (mct_checkFenceHoleAdjust_D(pMCTstat, pDCTstat, DQSDelay, ChipSel, &tmp)) {
+ goto skipLocMiddle;
+ }
+
+ MutualCSPassW[DQSDelay] &= tmp;
+ print_debug_dqs("\t\t\t\t\tTrainDQSPos: 146 \tMutualCSPassW ", MutualCSPassW[DQSDelay], 5);
+
+ SetTargetWTIO_D(TestAddr);
+ FlushDQSTestPattern_D(pDCTstat, TestAddr<<8);
+ ResetTargetWTIO_D();
+ }
+
+ }
+
+ if (BanksPresent) {
+ u8 mask_pass = 0;
+ for (ByteLane = 0; ByteLane < 8; ByteLane++) {
+ print_debug_dqs("\t\t\t\tTrainDQSPos: 31 ByteLane ",ByteLane, 4);
+ pDCTstat->ByteLane = ByteLane;
+ LastTest = DQS_FAIL; /* Analyze the results */
+ RnkDlySeqPassMin = 0;
+ RnkDlySeqPassMax = 0;
+ RnkDlyFilterMax = 0;
+ RnkDlyFilterMin = 0;
+ for (DQSDelay = 0; DQSDelay < dqsDelay_end; DQSDelay++) {
+ if (MutualCSPassW[DQSDelay] & (1 << ByteLane)) {
+ print_debug_dqs("\t\t\t\t\tTrainDQSPos: 321 DQSDelay ", DQSDelay, 5);
+ print_debug_dqs("\t\t\t\t\tTrainDQSPos: 322 MutualCSPassW ", MutualCSPassW[DQSDelay], 5);
+
+ RnkDlySeqPassMax = DQSDelay;
+ if (LastTest == DQS_FAIL) {
+ RnkDlySeqPassMin = DQSDelay; //start sequential run
+ }
+ if ((RnkDlySeqPassMax - RnkDlySeqPassMin)>(RnkDlyFilterMax-RnkDlyFilterMin)){
+ RnkDlyFilterMin = RnkDlySeqPassMin;
+ RnkDlyFilterMax = RnkDlySeqPassMax;
+ }
+ LastTest = DQS_PASS;
+ } else {
+ LastTest = DQS_FAIL;
+ }
+ }
+ print_debug_dqs("\t\t\t\tTrainDQSPos: 33 RnkDlySeqPassMax ", RnkDlySeqPassMax, 4);
+ if (RnkDlySeqPassMax == 0) {
+ Errors |= 1<<SB_NODQSPOS; /* no passing window */
+ } else {
+ print_debug_dqs_pair("\t\t\t\tTrainDQSPos: 34 RnkDlyFilter: ", RnkDlyFilterMin, " ", RnkDlyFilterMax, 4);
+ if (((RnkDlyFilterMax - RnkDlyFilterMin) < MIN_DQS_WNDW)){
+ Errors |= 1 << SB_SMALLDQS;
+ } else {
+ u8 middle_dqs;
+ /* mctEngDQSwindow_Save_D Not required for arrays */
+ middle_dqs = MiddleDQS_D(RnkDlyFilterMin, RnkDlyFilterMax);
+ pDCTstat->DQSDelay = middle_dqs;
+ mct_SetDQSDelayCSR_D(pMCTstat, pDCTstat, cs_start); /* load the register with the value */
+ StoreDQSDatStrucVal_D(pMCTstat, pDCTstat, cs_start); /* store the value into the data structure */
+ print_debug_dqs("\t\t\t\tTrainDQSPos: 42 middle_dqs : ",middle_dqs, 4);
+ }
+ }
+ }
+ print_debug_dqs("\t\t\t\tTrainDQSPos: 41 mask_pass ",mask_pass, 3);
+ }
+skipLocMiddle:
+ pDCTstat->TrainErrors = Errors;
+
+ print_debug_dqs("\t\t\tTrainDQSPos: Errors ", Errors, 3);
+
+}
+
+
+static void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 ChipSel)
+{
+ /* Store the DQSDelay value, found during a training sweep, into the DCT
+ * status structure for this node
+ */
+
+
+ /* When 400, 533, 667, it will support dimm0/1/2/3,
+ * and set conf for dimm0, hw will copy to dimm1/2/3
+ * set for dimm1, hw will copy to dimm3
+ * Rev A/B only support DIMM0/1 when 800Mhz and above + 0x100 to next dimm
+ * Rev C support DIMM0/1/2/3 when 800Mhz and above + 0x100 to next dimm
+ */
+
+ /* FindDQSDatDimmVal_D is not required since we use an array */
+ u8 dn = 0;
+
+ if (pDCTstat->Status & (1 << SB_Over400MHz))
+ dn = ChipSel>>1; /* if odd or even logical DIMM */
+
+ pDCTstat->CH_D_DIR_B_DQS[pDCTstat->Channel][dn][pDCTstat->Direction][pDCTstat->ByteLane] =
+ pDCTstat->DQSDelay;
+}
+
+
+static void GetDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 ChipSel)
+{
+ u8 dn = 0;
+
+
+ /* When 400, 533, 667, it will support dimm0/1/2/3,
+ * and set conf for dimm0, hw will copy to dimm1/2/3
+ * set for dimm1, hw will copy to dimm3
+ * Rev A/B only support DIMM0/1 when 800Mhz and above + 0x100 to next dimm
+ * Rev C support DIMM0/1/2/3 when 800Mhz and above + 0x100 to next dimm
+ */
+
+ /* FindDQSDatDimmVal_D is not required since we use an array */
+ if (pDCTstat->Status & (1<<SB_Over400MHz))
+ dn = ChipSel >> 1; /*if odd or even logical DIMM */
+
+ pDCTstat->DQSDelay =
+ pDCTstat->CH_D_DIR_B_DQS[pDCTstat->Channel][dn][pDCTstat->Direction][pDCTstat->ByteLane];
+}
+
+
+/* FindDQSDatDimmVal_D is not required since we use an array */
+
+
+static u8 MiddleDQS_D(u8 min, u8 max)
+{
+ u8 size;
+ size = max-min;
+ if (size % 2)
+ size++; // round up if the size isn't even.
+ return ( min + (size >> 1));
+}
+
+
+static void TrainReadDQS_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 cs_start)
+{
+ print_debug_dqs("\t\tTrainReadPos ", 0, 2);
+ pDCTstat->Direction = DQS_READDIR;
+ TrainDQSPos_D(pMCTstat, pDCTstat, cs_start);
+}
+
+
+static void TrainWriteDQS_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 cs_start)
+{
+ pDCTstat->Direction = DQS_WRITEDIR;
+ print_debug_dqs("\t\tTrainWritePos", 0, 2);
+ TrainDQSPos_D(pMCTstat, pDCTstat, cs_start);
+}
+
+
+static void proc_IOCLFLUSH_D(u32 addr_hi)
+{
+ SetTargetWTIO_D(addr_hi);
+ proc_CLFLUSH(addr_hi);
+ ResetTargetWTIO_D();
+}
+
+
+static u8 ChipSelPresent_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 Channel, u8 ChipSel)
+{
+ u32 val;
+ u32 reg;
+ u32 dev = pDCTstat->dev_dct;
+ u32 reg_off;
+ u8 ret = 0;
+
+ if (!pDCTstat->GangedMode) {
+ reg_off = 0x100 * Channel;
+ } else {
+ reg_off = 0;
+ }
+
+ if (ChipSel < MAX_CS_SUPPORTED){
+ reg = 0x40 + (ChipSel << 2) + reg_off;
+ val = Get_NB32(dev, reg);
+ if (val & ( 1 << 0))
+ ret = 1;
+ }
+
+ return ret;
+}
+
+
+/* proc_CLFLUSH_D located in mct_gcc.h */
+
+
+static void WriteDQSTestPattern_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 TestAddr_lo)
+{
+ /* Write a pattern of 72 bit times (per DQ), to test dram functionality.
+ * The pattern is a stress pattern which exercises both ISI and
+ * crosstalk. The number of cache lines to fill is dependent on DCT
+ * width mode and burstlength.
+ * Mode BL Lines Pattern no.
+ * ----+---+-------------------
+ * 64 4 9 0
+ * 64 8 9 0
+ * 64M 4 9 0
+ * 64M 8 9 0
+ * 128 4 18 1
+ * 128 8 N/A -
+ */
+
+ if (pDCTstat->Pattern == 0)
+ WriteL9TestPattern_D(pDCTstat, TestAddr_lo);
+ else
+ WriteL18TestPattern_D(pDCTstat, TestAddr_lo);
+}
+
+
+static void WriteL18TestPattern_D(struct DCTStatStruc *pDCTstat,
+ u32 TestAddr_lo)
+{
+ u8 *buf;
+
+ buf = (u8 *)pDCTstat->PtrPatternBufA;
+ WriteLNTestPattern(TestAddr_lo, buf, 18);
+
+}
+
+
+static void WriteL9TestPattern_D(struct DCTStatStruc *pDCTstat,
+ u32 TestAddr_lo)
+{
+ u8 *buf;
+
+ buf = (u8 *)pDCTstat->PtrPatternBufA;
+ WriteLNTestPattern(TestAddr_lo, buf, 9);
+}
+
+
+
+static u8 CompareDQSTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u32 addr_lo)
+{
+ /* Compare a pattern of 72 bit times (per DQ), to test dram functionality.
+ * The pattern is a stress pattern which exercises both ISI and
+ * crosstalk. The number of cache lines to fill is dependent on DCT
+ * width mode and burstlength.
+ * Mode BL Lines Pattern no.
+ * ----+---+-------------------
+ * 64 4 9 0
+ * 64 8 9 0
+ * 64M 4 9 0
+ * 64M 8 9 0
+ * 128 4 18 1
+ * 128 8 N/A -
+ */
+
+ u32 *test_buf;
+ u8 bitmap;
+ u8 bytelane;
+ u8 i;
+ u32 value;
+ u8 j;
+ u32 value_test;
+ u8 pattern, channel;
+
+ pattern = pDCTstat->Pattern;
+ channel = pDCTstat->Channel;
+ test_buf = (u32 *)pDCTstat->PtrPatternBufA;
+
+ if (pattern && channel) {
+ addr_lo += 8; //second channel
+ test_buf+= 2;
+ }
+
+ bytelane = 0;
+ bitmap = 0xFF;
+ for (i=0; i < (9 * 64 / 4); i++) { /* /4 due to next loop */
+ value = read32_fs(addr_lo);
+ value_test = *test_buf;
+
+ print_debug_dqs_pair("\t\t\t\t\t\ttest_buf = ", (u32)test_buf, " value = ", value_test, 7);
+ print_debug_dqs_pair("\t\t\t\t\t\ttaddr_lo = ", addr_lo, " value = ", value, 7);
+
+ for (j = 0; j < (4 * 8); j += 8) {
+ if (((value >> j) & 0xff) != ((value_test >> j) & 0xff)) {
+ bitmap &= ~(1 << bytelane);
+ }
+
+ bytelane++;
+ bytelane &= 0x7;
+ }
+
+ print_debug_dqs("\t\t\t\t\t\tbitmap = ", bitmap, 7);
+
+ if (!bitmap)
+ break;
+
+ if (bytelane == 0){
+ if (pattern == 1) { //dual channel
+ addr_lo += 8; //skip over other channel's data
+ test_buf += 2;
+ }
+ }
+ addr_lo += 4;
+ test_buf += 1;
+ }
+
+ return bitmap;
+}
+
+
+static void FlushDQSTestPattern_D(struct DCTStatStruc *pDCTstat,
+ u32 addr_lo)
+{
+ /* Flush functions in mct_gcc.h */
+ if (pDCTstat->Pattern == 0){
+ FlushDQSTestPattern_L9(addr_lo);
+ } else {
+ FlushDQSTestPattern_L18(addr_lo);
+ }
+}
+
+static void SetTargetWTIO_D(u32 TestAddr)
+{
+ u32 lo, hi;
+ hi = TestAddr >> 24;
+ lo = TestAddr << 8;
+ _WRMSR(0xC0010016, lo, hi); /* IORR0 Base */
+ hi = 0xFF;
+ lo = 0xFC000800; /* 64MB Mask */
+ _WRMSR(0xC0010017, lo, hi); /* IORR0 Mask */
+}
+
+
+static void ResetTargetWTIO_D(void)
+{
+ u32 lo, hi;
+
+ hi = 0;
+ lo = 0;
+ _WRMSR(0xc0010017, lo, hi); // IORR0 Mask
+}
+
+
+static void ReadDQSTestPattern_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 TestAddr_lo)
+{
+ /* Read a pattern of 72 bit times (per DQ), to test dram functionality.
+ * The pattern is a stress pattern which exercises both ISI and
+ * crosstalk. The number of cache lines to fill is dependent on DCT
+ * width mode and burstlength.
+ * Mode BL Lines Pattern no.
+ * ----+---+-------------------
+ * 64 4 9 0
+ * 64 8 9 0
+ * 64M 4 9 0
+ * 64M 8 9 0
+ * 128 4 18 1
+ * 128 8 N/A -
+ */
+ if (pDCTstat->Pattern == 0)
+ ReadL9TestPattern(TestAddr_lo);
+ else
+ ReadL18TestPattern(TestAddr_lo);
+ _MFENCE;
+}
+
+
+u32 SetUpperFSbase(u32 addr_hi)
+{
+ /* Set the upper 32-bits of the Base address, 4GB aligned) for the
+ * FS selector.
+ */
+
+ u32 lo, hi;
+ u32 addr;
+ lo = 0;
+ hi = addr_hi>>24;
+ addr = FS_Base;
+ _WRMSR(addr, lo, hi);
+ return addr_hi<<8;
+}
+
+
+void ResetDCTWrPtr_D(u32 dev, u32 index_reg, u32 index)
+{
+ u32 val;
+
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ Set_NB32_index_wait(dev, index_reg, index, val);
+}
+
+
+/* mctEngDQSwindow_Save_D not required with arrays */
+
+
+void mct_TrainDQSPos_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node;
+ u8 ChipSel;
+ struct DCTStatStruc *pDCTstat;
+
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ pDCTstat = pDCTstatA + Node;
+ if (pDCTstat->DCTSysLimit) {
+ /* when DCT speed >= 400MHz, we only support 2 DIMMs
+ * and we have two sets registers for DIMM0 and DIMM1 so
+ * here we must traning DQSRd/WrPos for DIMM0 and DIMM1
+ */
+ if (pDCTstat->Speed >= 4) {
+ pDCTstat->Status |= (1 << SB_Over400MHz);
+ }
+ for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel += 2) {
+ TrainDQSRdWrPos_D(pMCTstat, pDCTstat, ChipSel);
+ SetEccDQSRdWrPos_D(pMCTstat, pDCTstat, ChipSel);
+ }
+ }
+ }
+}
+
+
+/* mct_BeforeTrainDQSRdWrPos_D
+ * Function is inline.
+ */
+
+u8 mct_DisableDimmEccEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u8 _DisableDramECC = 0;
+ u32 val;
+ u32 reg;
+ u32 dev;
+
+ /*Disable ECC correction of reads on the dram bus. */
+
+ dev = pDCTstat->dev_dct;
+ reg = 0x90;
+ val = Get_NB32(dev, reg);
+ if (val & (1<<DimmEcEn)) {
+ _DisableDramECC |= 0x01;
+ val &= ~(1<<DimmEcEn);
+ Set_NB32(dev, reg, val);
+ }
+ if (!pDCTstat->GangedMode) {
+ reg = 0x190;
+ val = Get_NB32(dev, reg);
+ if (val & (1<<DimmEcEn)) {
+ _DisableDramECC |= 0x02;
+ val &= ~(1<<DimmEcEn);
+ Set_NB32(dev, reg, val);
+ }
+ }
+ return _DisableDramECC;
+}
+
+
+
+void mct_EnableDimmEccEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 _DisableDramECC)
+{
+
+ u32 val;
+ u32 reg;
+ u32 dev;
+
+ /* Enable ECC correction if it was previously disabled */
+
+ dev = pDCTstat->dev_dct;
+
+ if ((_DisableDramECC & 0x01) == 0x01) {
+ reg = 0x90;
+ val = Get_NB32(dev, reg);
+ val |= (1<<DimmEcEn);
+ Set_NB32(dev, reg, val);
+ }
+ if ((_DisableDramECC & 0x02) == 0x02) {
+ reg = 0x190;
+ val = Get_NB32(dev, reg);
+ val |= (1<<DimmEcEn);
+ Set_NB32(dev, reg, val);
+ }
+}
+
+
+static void mct_SetDQSDelayCSR_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 ChipSel)
+{
+ u8 ByteLane;
+ u32 val;
+ u32 index_reg = 0x98 + 0x100 * pDCTstat->Channel;
+ u8 shift;
+ u32 dqs_delay = (u32)pDCTstat->DQSDelay;
+ u32 dev = pDCTstat->dev_dct;
+ u32 index;
+
+ ByteLane = pDCTstat->ByteLane;
+
+ /* Channel is offset */
+ if (ByteLane < 4) {
+ index = 1;
+ } else if (ByteLane <8) {
+ index = 2;
+ } else {
+ index = 3;
+ }
+
+ if (pDCTstat->Direction == DQS_READDIR) {
+ index += 4;
+ }
+
+ /* get the proper register index */
+ shift = ByteLane%4;
+ shift <<= 3; /* get bit position of bytelane, 8 bit */
+
+ if (pDCTstat->Status & (1 << SB_Over400MHz)) {
+ index += (ChipSel >> 1) * 0x100; /* if logical DIMM1/DIMM3 */
+ }
+
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ val &= ~(0x7f << shift);
+ val |= (dqs_delay << shift);
+ Set_NB32_index_wait(dev, index_reg, index, val);
+}
+
+
+static void mct_SetDQSDelayAllCSR_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 cs_start)
+{
+ u8 ByteLane;
+ u8 ChipSel = cs_start;
+
+
+ for (ChipSel = cs_start; ChipSel < (cs_start + 2); ChipSel++) {
+ if ( mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, pDCTstat->Channel, ChipSel)) {
+ for (ByteLane = 0; ByteLane < 8; ByteLane++) {
+ pDCTstat->ByteLane = ByteLane;
+ mct_SetDQSDelayCSR_D(pMCTstat, pDCTstat, ChipSel);
+ }
+ }
+ }
+}
+
+
+u8 mct_RcvrRankEnabled_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 Channel, u8 ChipSel)
+{
+ u8 ret;
+
+ ret = ChipSelPresent_D(pMCTstat, pDCTstat, Channel, ChipSel);
+ return ret;
+}
+
+
+u32 mct_GetRcvrSysAddr_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 channel, u8 receiver, u8 *valid)
+{
+ return mct_GetMCTSysAddr_D(pMCTstat, pDCTstat, channel, receiver, valid);
+}
+
+
+u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 Channel, u8 receiver, u8 *valid)
+{
+ u32 val;
+ u32 reg_off = 0;
+ u32 reg;
+ u32 dword;
+ u32 dev = pDCTstat->dev_dct;
+
+ *valid = 0;
+
+
+ if (!pDCTstat->GangedMode) { // FIXME: not used.
+ reg_off = 0x100 * Channel;
+ }
+
+ /* get the local base addr of the chipselect */
+ reg = 0x40 + (receiver << 2);
+ val = Get_NB32(dev, reg);
+
+ val &= ~0x0F;
+
+ /* unganged mode DCT0+DCT1, sys addr of DCT1=node
+ * base+DctSelBaseAddr+local ca base*/
+ if ((Channel) && (pDCTstat->GangedMode == 0) && ( pDCTstat->DIMMValidDCT[0] > 0)) {
+ reg = 0x110;
+ dword = Get_NB32(dev, reg);
+ dword &= 0xfffff800;
+ dword <<= 8; /* scale [47:27] of F2x110[31:11] to [39:8]*/
+ val += dword;
+
+ /* if DCTSelBaseAddr < Hole, and eax > HoleBase, then add Hole size to test address */
+ if ((val >= pDCTstat->DCTHoleBase) && (pDCTstat->DCTHoleBase > dword)) {
+ dword = (~(pDCTstat->DCTHoleBase >> (24 - 8)) + 1) & 0xFF;
+ dword <<= (24 - 8);
+ val += dword;
+ }
+ } else {
+ /* sys addr=node base+local cs base */
+ val += pDCTstat->DCTSysBase;
+
+ /* New stuff */
+ if (pDCTstat->DCTHoleBase && (val >= pDCTstat->DCTHoleBase)) {
+ val -= pDCTstat->DCTSysBase;
+ dword = Get_NB32(pDCTstat->dev_map, 0xF0); /* get Hole Offset */
+ val += (dword & 0x0000ff00) << (24-8-8);
+ }
+ }
+
+ /* New stuff */
+ val += ((1 << 21) >> 8); /* Add 2MB offset to avoid compat area */
+ if (val >= MCT_TRNG_KEEPOUT_START) {
+ while(val < MCT_TRNG_KEEPOUT_END)
+ val += (1 << (15-8)); /* add 32K */
+ }
+
+ /* HW remap disabled? */
+ if (!(pDCTstat->Status & (1 << SB_HWHole))) {
+ if (!(pDCTstat->Status & (1 << SB_SWNodeHole))) {
+ /* SW memhole disabled */
+ u32 lo, hi;
+ _RDMSR(TOP_MEM, &lo, &hi);
+ lo >>= 8;
+ if ((val >= lo) && (val < _4GB_RJ8)) {
+ val = 0;
+ *valid = 0;
+ goto exitGetAddr;
+ } else {
+ *valid = 1;
+ goto exitGetAddrWNoError;
+ }
+ } else {
+ *valid = 1;
+ goto exitGetAddrWNoError;
+ }
+ } else {
+ *valid = 1;
+ goto exitGetAddrWNoError;
+ }
+
+exitGetAddrWNoError:
+
+ /* Skip if Address is in UMA region */
+ dword = pMCTstat->Sub4GCacheTop;
+ dword >>= 8;
+ if (dword != 0) {
+ if ((val >= dword) && (val < _4GB_RJ8)) {
+ val = 0;
+ *valid = 0;
+ } else {
+ *valid = 1;
+ }
+ }
+
+exitGetAddr:
+ return val;
+}
+
+
+void mct_Write1LTestPattern_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 TestAddr, u8 pattern)
+{
+
+ u8 *buf;
+
+ /* Issue the stream of writes. When F2x11C[MctWrLimit] is reached
+ * (or when F2x11C[FlushWr] is set again), all the writes are written
+ * to DRAM.
+ */
+
+ SetUpperFSbase(TestAddr);
+
+ if (pattern)
+ buf = (u8 *)pDCTstat->PtrPatternBufB;
+ else
+ buf = (u8 *)pDCTstat->PtrPatternBufA;
+
+ WriteLNTestPattern(TestAddr << 8, buf, 1);
+}
+
+
+void mct_Read1LTestPattern_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u32 addr)
+{
+ u32 value;
+
+ /* BIOS issues the remaining (Ntrain - 2) reads after checking that
+ * F2x11C[PrefDramTrainMode] is cleared. These reads must be to
+ * consecutive cache lines (i.e., 64 bytes apart) and must not cross
+ * a naturally aligned 4KB boundary. These reads hit the prefetches and
+ * read the data from the prefetch buffer.
+ */
+
+ /* get data from DIMM */
+ SetUpperFSbase(addr);
+
+ /* 1st move causes read fill (to exclusive or shared)*/
+ value = read32_fs(addr<<8);
+}
diff --git a/src/northbridge/amd/amdmct/mct/mctecc_d.c b/src/northbridge/amd/amdmct/mct/mctecc_d.c
new file mode 100644
index 0000000000..b48c1f5417
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctecc_d.c
@@ -0,0 +1,296 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "mct_d.h"
+
+static void setSyncOnUnEccEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA);
+static u32 GetScrubAddr_D(u32 Node);
+static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat);
+
+
+/* Initialize ECC modes of Integrated Dram+Memory Controllers of a network of
+ * Hammer processors. Use Dram background scrubber to fast initialize ECC bits
+ * of all dram.
+ *
+ * Notes:
+ *
+ * Order that items are set:
+ * 1. eccen bit in NB
+ * 2. Scrub Base
+ * 3. Temp Node Base
+ * 4. Temp Node Limit
+ * 5. Redir bit in NB
+ * 6. Scrub CTL
+ *
+ * Conditions for setting background scrubber.
+ * 1. node is present
+ * 2. node has dram functioning (WE=RE=1)
+ * 3. all eccdimms (or bit 17 of offset 90,fn 2)
+ * 4. no chip-select gap exists
+ *
+ * The dram background scrubber is used under very controlled circumstances to
+ * initialize all the ECC bits on the DIMMs of the entire dram address map
+ * (including hidden or lost dram and dram above 4GB). We will turn the scrub
+ * rate up to maximum, which should clear 4GB of dram in about 2.7 seconds.
+ * We will activate the scrubbers of all nodes with ecc dram and let them run in
+ * parallel, thereby reducing even further the time required to condition dram.
+ * Finally, we will go through each node and either disable background scrubber,
+ * or set the scrub rate to the user setup specified rate.
+ *
+ * To allow the NB to scrub, we need to wait a time period long enough to
+ * guarantee that the NB scrubs the entire dram on its node. Do do this, we
+ * simply sample the scrub ADDR once, for an initial value, then we sample and poll until the polled value of scrub ADDR
+ * has wrapped around at least once: Scrub ADDRi+1 < Scrub ADDRi. Since we let all
+ * Nodes run in parallel, we need to gaurantee that all nodes have wrapped. To do
+ * this efficiently, we need only to sample one of the nodes, the node with the
+ * largest ammount of dram populated is the one which will take the longest amount
+ * of time (the scrub rate is set to max, the same rate, on all nodes). So,
+ * during setup of scrub Base, we determine how much memory and which node has
+ * the largest memory installed.
+ *
+ * Scrubbing should not ordinarily be enabled on a Node with a chip-select gap
+ * (aka SW memhole, cs hoisting, etc..).To init ECC memory on this node, the
+ * scrubber is used in two steps. First, the Dram Limit for the node is adjusted
+ * down to the bottom of the gap, and that ECC dram is initialized. Second, the
+ * orignal Limit is restored, the Scrub base is set to 4GB, and scrubber is
+ * allowed to run until the Scrub Addr wraps around to zero.
+ */
+u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node;
+ u8 AllECC;
+ u16 OB_NBECC;
+ u32 curBase;
+ u16 OB_ECCRedir;
+ u32 LDramECC;
+ u32 OF_ScrubCTL;
+ u16 OB_ChipKill;
+ u8 MemClrECC;
+
+ u32 dev;
+ u32 reg;
+ u32 val;
+
+ mctHookBeforeECC();
+
+ /* Construct these booleans, based on setup options, for easy handling
+ later in this procedure */
+ OB_NBECC = mctGet_NVbits(NV_NBECC); /* MCA ECC (MCE) enable bit */
+
+ OB_ECCRedir = mctGet_NVbits(NV_ECCRedir); /* ECC Redirection */
+
+ OB_ChipKill = mctGet_NVbits(NV_ChipKill); /* ECC Chip-kill mode */
+
+ OF_ScrubCTL = 0; /* Scrub CTL for Dcache, L2, and dram */
+ val = mctGet_NVbits(NV_DCBKScrub);
+ mct_AdjustScrub_D(pDCTstatA, val);
+ OF_ScrubCTL |= val << 16;
+ val = mctGet_NVbits(NV_L2BKScrub);
+ OF_ScrubCTL |= val << 8;
+
+ val = mctGet_NVbits(NV_DramBKScrub);
+ OF_ScrubCTL |= val;
+
+ AllECC = 1;
+ MemClrECC = 0;
+ print_t(" ECCInit 0 \n");
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+ LDramECC = 0;
+ if (NodePresent_D(Node)) { /*If Node is present */
+ dev = pDCTstat->dev_map;
+ reg = 0x40+(Node << 3); /* Dram Base Node 0 + index */
+ val = Get_NB32(dev, reg);
+
+ /* WE/RE is checked */
+ if((val & 3)==3) { /* Node has dram populated */
+ /* Negate 'all nodes/dimms ECC' flag if non ecc
+ memory populated */
+ if( pDCTstat->Status & (1<<SB_ECCDIMMs)) {
+ LDramECC = isDramECCEn_D(pDCTstat);
+ if(pDCTstat->ErrCode != SC_RunningOK) {
+ pDCTstat->Status &= ~(1 << SB_ECCDIMMs);
+ if (OB_NBECC) {
+ pDCTstat->ErrStatus |= (1 << SB_DramECCDis);
+ }
+ AllECC = 0;
+ LDramECC =0;
+ }
+ } else {
+ AllECC = 0;
+ }
+ if(LDramECC) { /* if ECC is enabled on this dram */
+ if (OB_NBECC) {
+ mct_EnableDatIntlv_D(pMCTstat, pDCTstat);
+ dev = pDCTstat->dev_nbmisc;
+ reg =0x44; /* MCA NB Configuration */
+ val = Get_NB32(dev, reg);
+ val |= 1 << 22; /* EccEn */
+ Set_NB32(dev, reg, val);
+ DCTMemClr_Init_D(pMCTstat, pDCTstat);
+ MemClrECC = 1;
+ print_tx(" ECC enabled on node: ", Node);
+ }
+ } /* this node has ECC enabled dram */
+ } else {
+ LDramECC = 0;
+ } /* Node has Dram */
+
+ if (MemClrECC) {
+ MCTMemClrSync_D(pMCTstat, pDCTstatA);
+ }
+ } /* if Node present */
+ }
+ print_t(" ECCInit 1 \n");
+
+ if(AllECC)
+ pMCTstat->GStatus |= 1<<GSB_ECCDIMMs;
+ else
+ pMCTstat->GStatus &= ~(1<<GSB_ECCDIMMs);
+
+ print_t(" ECCInit 2 \n");
+
+ /* Program the Dram BKScrub CTL to the proper (user selected) value.*/
+ /* Reset MC4_STS. */
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+ LDramECC = 0;
+ if (NodePresent_D(Node)) { /* If Node is present */
+ reg = 0x40+(Node<<3); /* Dram Base Node 0 + index */
+ val = Get_NB32(pDCTstat->dev_map, reg);
+ curBase = val & 0xffff0000;
+ /*WE/RE is checked because memory config may have been */
+ if((val & 3)==3) { /* Node has dram populated */
+ if (isDramECCEn_D(pDCTstat)) { /* if ECC is enabled on this dram */
+ dev = pDCTstat->dev_nbmisc;
+ val = curBase << 8;
+ if(OB_ECCRedir) {
+ val |= (1<<0); /* enable redirection */
+ }
+ Set_NB32(dev, 0x5C, val); /* Dram Scrub Addr Low */
+ val = curBase>>24;
+ Set_NB32(dev, 0x60, val); /* Dram Scrub Addr High */
+ Set_NB32(dev, 0x58, OF_ScrubCTL); /*Scrub Control */ /*set dram background scrubbing to setup value */
+ } /* this node has ECC enabled dram */
+ } /*Node has Dram */
+ } /*if Node present */
+ }
+ print_t(" ECCInit 3 \n");
+
+ if(mctGet_NVbits(NV_SyncOnUnEccEn))
+ setSyncOnUnEccEn_D(pMCTstat, pDCTstatA);
+
+ mctHookAfterECC();
+ return MemClrECC;
+}
+
+
+static void setSyncOnUnEccEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u32 Node;
+ u32 reg;
+ u32 dev;
+ u32 val;
+
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+ if (NodePresent_D(Node)) { /* If Node is present*/
+ reg = 0x40+(Node<<3); /* Dram Base Node 0 + index*/
+ val = Get_NB32(pDCTstat->dev_map, reg);
+ /*WE/RE is checked because memory config may have been*/
+ if((val & 3)==3) { /* Node has dram populated*/
+ if( isDramECCEn_D(pDCTstat)) {
+ /*if ECC is enabled on this dram*/
+ dev = pDCTstat->dev_nbmisc;
+ reg = 0x44; /* MCA NB Configuration*/
+ val = Get_NB32(dev, reg);
+ val |= (1<<SyncOnUcEccEn);
+ Set_NB32(dev, reg, val);
+ }
+ } /* Node has Dram*/
+ } /* if Node present*/
+ }
+}
+
+
+static u32 GetScrubAddr_D(u32 Node)
+{
+ /* Get the current 40-bit Scrub ADDR address, scaled to 32-bits,
+ * of the specified Node.
+ */
+
+ u32 reg;
+ u32 regx;
+ u32 lo, hi;
+ u32 val;
+ u32 dev = PA_NBMISC(Node);
+
+
+ reg = 0x60; /* Scrub Addr High */
+ hi = Get_NB32(dev, reg);
+
+ regx = 0x5C; /* Scrub Addr Low */
+ lo = Get_NB32(dev, regx);
+ /* Scrub Addr High again, detect 32-bit wrap */
+ val = Get_NB32(dev, reg);
+ if(val != hi) {
+ hi = val; /* Scrub Addr Low again, if wrap occured */
+ lo = Get_NB32(dev, regx);
+ }
+
+ val = hi << 24;
+ val |= lo >> 8;
+
+ return val; /* ScrubAddr[39:8] */
+}
+
+
+static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat)
+{
+ u32 reg;
+ u32 val;
+ u8 i;
+ u32 dev = pDCTstat->dev_dct;
+ u8 ch_end;
+ u8 isDimmECCEn = 0;
+
+ if(pDCTstat->GangedMode) {
+ ch_end = 1;
+ } else {
+ ch_end = 2;
+ }
+ for(i=0; i<ch_end; i++) {
+ if(pDCTstat->DIMMValidDCT[i] > 0){
+ reg = 0x90 + i * 0x100; /* Dram Config Low */
+ val = Get_NB32(dev, reg);
+ if(val & (1<<DimmEcEn)) {
+ /* set local flag 'dram ecc capable' */
+ isDimmECCEn = 1;
+ break;
+ }
+ }
+ }
+ return isDimmECCEn;
+}
diff --git a/src/northbridge/amd/amdmct/mct/mctgr.c b/src/northbridge/amd/amdmct/mct/mctgr.c
new file mode 100644
index 0000000000..ecf5847db3
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctgr.c
@@ -0,0 +1,88 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+static const u8 Tab_GRCLKDis[] = { 8,0,8,8,0,0,8,0 };
+
+
+u32 mct_AdjustMemClkDis_GR(struct DCTStatStruc *pDCTstat, u32 dct,
+ u32 DramTimingLo)
+{
+ /* Greayhound format -> Griffin format */
+ u32 NewDramTimingLo;
+ u32 dev = pDCTstat->dev_dct;
+ u32 reg;
+ u32 reg_off = 0x100 * dct;
+ u32 val;
+ int i;
+
+ DramTimingLo = val;
+ /* Dram Timing Low (owns Clock Enable bits) */
+ NewDramTimingLo = Get_NB32(dev, 0x88 + reg_off);
+ if(mctGet_NVbits(NV_AllMemClks)==0) {
+ /*Special Jedec SPD diagnostic bit - "enable all clocks"*/
+ if(!(pDCTstat->Status & (1<<SB_DiagClks))) {
+ for(i=0; i<MAX_DIMMS_SUPPORTED; i++) {
+ val = Tab_GRCLKDis[i];
+ if(val<8) {
+ if(!(pDCTstat->DIMMValidDCT[dct] & (1<<val))) {
+ /* disable memclk */
+ NewDramTimingLo |= (1<<(i+1));
+ }
+ }
+ }
+ }
+ }
+ DramTimingLo &= ~(0xff<<24);
+ DramTimingLo |= NewDramTimingLo & (0xff<<24);
+ DramTimingLo &= (0x4d<<24); /* FIXME - enable all MemClks for now */
+
+ return DramTimingLo;
+}
+
+
+u32 mct_AdjustDramConfigLo_GR(struct DCTStatStruc *pDCTstat, u32 dct, u32 val)
+{
+ /* Greayhound format -> Griffin format */
+ /*FIXME - BurstLength32 must be 0 when F3x44[DramEccEn]=1. */
+/*
+ ; mov cx,PA_NBMISC+44h ;MCA NB Configuration
+ ; call Get_NB32n_D
+ ; bt eax,22 ;EccEn
+ ; .if(CARRY?)
+ ; btr eax,BurstLength32
+ ; .endif
+*/
+ return val;
+}
+
+
+void mct_AdjustMemHoist_GR(struct DCTStatStruc *pDCTstat, u32 base, u32 HoleSize)
+{
+ u32 val;
+ if(base >= pDCTstat->DCTHoleBase) {
+ u32 dev = pDCTstat->dev_dct;
+ base += HoleSize;
+ base >>= 27 - 8;
+ val = Get_NB32(dev, 0x110);
+ val &= ~(0xfff<<11);
+ val |= (base & 0xfff)<<11;
+ Set_NB32(dev, 0x110, val);
+ }
+}
diff --git a/src/northbridge/amd/amdmct/mct/mcthdi.c b/src/northbridge/amd/amdmct/mct/mcthdi.c
new file mode 100644
index 0000000000..ee347502bf
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mcthdi.c
@@ -0,0 +1,33 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+void mct_DramInit_Hw_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u32 val;
+ u32 reg;
+ u32 dev = pDCTstat->dev_dct;
+
+ /*flag for selecting HW/SW DRAM Init HW DRAM Init */
+ reg = 0x90 + 0x100 * dct; /*DRAM Configuration Low */
+ val = Get_NB32(dev, reg);
+ val |= (1<<InitDram);
+ Set_NB32(dev, reg, val);
+}
diff --git a/src/northbridge/amd/amdmct/mct/mctmtr_d.c b/src/northbridge/amd/amdmct/mct/mctmtr_d.c
new file mode 100644
index 0000000000..d7cb3649e5
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctmtr_d.c
@@ -0,0 +1,213 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "mct_d.h"
+
+static void SetMTRRrangeWB_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr);
+static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType);
+
+void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ /* BSP only. Set the fixed MTRRs for common legacy ranges.
+ * Set TOP_MEM and TOM2.
+ * Set some variable MTRRs with WB Uncacheable type.
+ */
+
+ u32 Bottom32bIO, Bottom40bIO, Cache32bTOP;
+ u32 val;
+ u32 addr;
+ u32 lo, hi;
+
+ /* Set temporary top of memory from Node structure data.
+ * Adjust temp top of memory down to accomodate 32-bit IO space.
+ * Bottom40bIO=top of memory, right justified 8 bits
+ * (defines dram versus IO space type)
+ * Bottom32bIO=sub 4GB top of memory, right justified 8 bits
+ * (defines dram versus IO space type)
+ * Cache32bTOP=sub 4GB top of WB cacheable memory,
+ * right justified 8 bits
+ */
+
+ val = mctGet_NVbits(NV_BottomIO);
+ if(val == 0)
+ val++;
+
+ Bottom32bIO = val << (24-8);
+
+ val = pMCTstat->SysLimit + 1;
+ if(val <= _4GB_RJ8) {
+ Bottom40bIO = 0;
+ if(Bottom32bIO >= val)
+ Bottom32bIO = val;
+ } else {
+ Bottom40bIO = val;
+ }
+
+ val = mctGet_NVbits(NV_BottomUMA);
+ if(val == 0)
+ val++;
+
+ val <<= (24-8);
+ if(val > Bottom32bIO)
+ val = Bottom32bIO;
+
+ Cache32bTOP = val;
+
+ /*======================================================================
+ Set default values for CPU registers
+ ======================================================================*/
+
+ /* NOTE : For LinuxBIOS, we don't need to set mtrr enables here because
+ they are still enable from cache_as_ram.inc */
+
+ addr = 0x250;
+ lo = 0x1E1E1E1E;
+ hi = lo;
+ _WRMSR(addr, lo, hi); /* 0 - 512K = WB Mem */
+ addr = 0x258;
+ _WRMSR(addr, lo, hi); /* 512K - 640K = WB Mem */
+
+ /*======================================================================
+ Set variable MTRR values
+ ======================================================================*/
+ /* NOTE: for LinuxBIOS change from 0x200 to 0x204: LinuxBIOS is using
+ 0x200, 0x201 for [1M, CONFIG_TOP_MEM)
+ 0x202, 0x203 for ROM Caching
+ */
+ addr = 0x204; /* MTRR phys base 2*/
+ /* use TOP_MEM as limit*/
+ /* Limit=TOP_MEM|TOM2*/
+ /* Base=0*/
+ print_tx("\t CPUMemTyping: Cache32bTOP:", Cache32bTOP);
+ SetMTRRrangeWB_D(0, &Cache32bTOP, &addr);
+ /* Base */
+ /* Limit */
+ /* MtrrAddr */
+ if(addr == -1) /* ran out of MTRRs?*/
+ pMCTstat->GStatus |= 1<<GSB_MTRRshort;
+
+ pMCTstat->Sub4GCacheTop = Cache32bTOP<<8;
+
+ /*======================================================================
+ Set TOP_MEM and TOM2 CPU registers
+ ======================================================================*/
+ addr = TOP_MEM;
+ lo = Bottom32bIO<<8;
+ hi = Bottom32bIO>>24;
+ _WRMSR(addr, lo, hi);
+ print_tx("\t CPUMemTyping: Bottom32bIO:", Bottom32bIO);
+ print_tx("\t CPUMemTyping: Bottom40bIO:", Bottom40bIO);
+ if(Bottom40bIO) {
+ hi = Bottom40bIO >> 24;
+ lo = Bottom40bIO << 8;
+ addr += 3; /* TOM2 */
+ _WRMSR(addr, lo, hi);
+ }
+ addr = 0xC0010010; /* SYS_CFG */
+ _RDMSR(addr, &lo, &hi);
+ if(Bottom40bIO) {
+ lo |= (1<<21); /* MtrrTom2En=1 */
+ lo |= (1<<22); /* Tom2ForceMemTypeWB */
+ } else {
+ lo &= ~(1<<21); /* MtrrTom2En=0 */
+ lo &= ~(1<<22); /* Tom2ForceMemTypeWB */
+ }
+ _WRMSR(addr, lo, hi);
+}
+
+
+static void SetMTRRrangeWB_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr)
+{
+ /*set WB type*/
+ SetMTRRrange_D(Base, pLimit, pMtrrAddr, 6);
+}
+
+
+static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType)
+{
+ /* Program MTRRs to describe given range as given cache type.
+ * Use MTRR pairs starting with the given MTRRphys Base address,
+ * and use as many as is required up to (excluding) MSR 020C, which
+ * is reserved for OS.
+ *
+ * "Limit" in the context of this procedure is not the numerically
+ * correct limit, but rather the Last address+1, for purposes of coding
+ * efficiency and readability. Size of a region is then Limit-Base.
+ *
+ * 1. Size of each range must be a power of two
+ * 2. Each range must be naturally aligned (Base is same as size)
+ *
+ * There are two code paths: the ascending path and descending path
+ * (analogous to bsf and bsr), where the next limit is a funtion of the
+ * next set bit in a forward or backward sequence of bits (as a function
+ * of the Limit). We start with the ascending path, to ensure that
+ * regions are naturally aligned, then we switch to the descending path
+ * to maximize MTRR usage efficiency. Base=0 is a special case where we
+ * start with the descending path. Correct Mask for region is
+ * 2comp(Size-1)-1, which is 2comp(Limit-Base-1)-1
+ */
+
+ u32 curBase, curLimit, curSize;
+ u32 val, valx;
+ u32 addr;
+
+ val = curBase = Base;
+ curLimit = *pLimit;
+ addr = *pMtrrAddr;
+ while((addr >= 0x200) && (addr < 0x20C) && (val < *pLimit)) {
+ /* start with "ascending" code path */
+ /* alignment (largest block size)*/
+ valx = 1 << bsf(curBase);
+ curSize = valx;
+
+ /* largest legal limit, given current non-zero range Base*/
+ valx += curBase;
+ if((curBase == 0) || (*pLimit < valx)) {
+ /* flop direction to "descending" code path*/
+ valx = 1<<bsr(*pLimit - curBase);
+ curSize = valx;
+ valx += curBase;
+ }
+ curLimit = valx; /*eax=curBase, edx=curLimit*/
+ valx = val>>24;
+ val <<= 8;
+
+ /* now program the MTRR */
+ val |= MtrrType; /* set cache type (UC or WB)*/
+ _WRMSR(addr, val, valx); /* prog. MTRR with current region Base*/
+ val = ((~(curSize - 1))+1) - 1; /* Size-1*/ /*Mask=2comp(Size-1)-1*/
+ valx = (val >> 24) | (0xff00); /* GH have 48 bits addr */
+ val <<= 8;
+ val |= ( 1 << 11); /* set MTRR valid*/
+ addr++;
+ _WRMSR(addr, val, valx); /* prog. MTRR with current region Mask*/
+ val = curLimit;
+ curBase = val; /* next Base = current Limit (loop exit)*/
+ addr++; /* next MTRR pair addr */
+ }
+ if(val < *pLimit) {
+ *pLimit = val;
+ addr = -1;
+ }
+ *pMtrrAddr = addr;
+}
+
+
diff --git a/src/northbridge/amd/amdmct/mct/mctndi_d.c b/src/northbridge/amd/amdmct/mct/mctndi_d.c
new file mode 100644
index 0000000000..8e96a567e5
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctndi_d.c
@@ -0,0 +1,237 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+
+void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+
+ /* Applies Node memory interleaving if enabled and if all criteria are met. */
+ u8 Node;
+ u32 Base;
+ u32 MemSize, MemSize0 = 0;
+ u32 Dct0MemSize = 0, DctSelBase, DctSelBaseOffset;
+ u8 Nodes;
+ u8 NodesWmem;
+ u8 DoIntlv;
+ u8 _NdIntCap;
+ u8 _SWHole;
+ u8 HWHoleSz;
+ u32 DramHoleAddrReg;
+ u32 HoleBase;
+ u32 dev0;
+ u32 reg0;
+ u32 val;
+ u8 i;
+ struct DCTStatStruc *pDCTstat;
+
+ DoIntlv = mctGet_NVbits(NV_NodeIntlv);
+
+ _NdIntCap = 0;
+ HWHoleSz = 0; /*For HW remapping, NOT Node hoisting. */
+
+ pDCTstat = pDCTstatA + 0;
+ dev0 = pDCTstat->dev_host;
+ Nodes = ((Get_NB32(dev0, 0x60) >> 4) & 0x7) + 1;
+
+
+ dev0 = pDCTstat->dev_map;
+ reg0 = 0x40;
+
+ NodesWmem = 0;
+ Node = 0;
+
+ while (DoIntlv && (Node < Nodes)) {
+ pDCTstat = pDCTstatA + Node;
+ if (pMCTstat->GStatus & (1 << GSB_SpIntRemapHole)) {
+ pMCTstat->GStatus |= 1 << GSB_HWHole;
+ _SWHole = 0;
+ } else if (pDCTstat->Status & (1 << SB_SWNodeHole)) {
+ _SWHole = 1;
+ } else {
+ _SWHole = 0;
+ }
+
+ if(!_SWHole) {
+ Base = Get_NB32(dev0, reg0);
+ if (Base & 1) {
+ NodesWmem++;
+ Base &= 0xFFFF0000; /* Base[39:8] */
+
+ if (pDCTstat->Status & (1 << SB_HWHole )) {
+
+ /* to get true amount of dram,
+ * subtract out memory hole if HW dram remapping */
+ DramHoleAddrReg = Get_NB32(pDCTstat->dev_map, 0xF0);
+ HWHoleSz = DramHoleAddrReg >> 16;
+ HWHoleSz = (((~HWHoleSz) + 1) & 0xFF);
+ HWHoleSz <<= 24-8;
+ }
+ /* check to see if the amount of memory on each channel
+ * are the same on all nodes */
+
+ DctSelBase = Get_NB32(pDCTstat->dev_dct, 0x114);
+ if(DctSelBase) {
+ DctSelBase <<= 8;
+ if ( pDCTstat->Status & (1 << SB_HWHole)) {
+ if (DctSelBase >= 0x1000000) {
+ DctSelBase -= HWHoleSz;
+ }
+ }
+ DctSelBaseOffset -= Base;
+ if (Node == 0) {
+ Dct0MemSize = DctSelBase;
+ } else if (DctSelBase != Dct0MemSize) {
+ break;
+ }
+ }
+
+ MemSize = Get_NB32(dev0, reg0 + 4);
+ MemSize &= 0xFFFF0000;
+ MemSize += 0x00010000;
+ MemSize -= Base;
+ if ( pDCTstat->Status & (1 << SB_HWHole)) {
+ MemSize -= HWHoleSz;
+ }
+ if (Node == 0) {
+ MemSize0 = MemSize;
+ } else if (MemSize0 != MemSize) {
+ break;
+ }
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ Node++;
+ reg0 += 8;
+ }
+
+ if (Node == Nodes) {
+ /* if all nodes have memory and no Node had SW memhole */
+ if (Nodes == 2 || Nodes == 4 || Nodes == 8)
+ _NdIntCap = 1;
+ }
+
+ if (!_NdIntCap)
+ DoIntlv = 0;
+
+
+ if (pMCTstat->GStatus & 1 << (GSB_SpIntRemapHole)) {
+ HWHoleSz = pMCTstat->HoleBase;
+ if (HWHoleSz == 0) {
+ HWHoleSz = mctGet_NVbits(NV_BottomIO) & 0xFF;
+ HWHoleSz <<= 24-8;
+ }
+ HWHoleSz = ((~HWHoleSz) + 1) & 0x00FF0000;
+ }
+
+ if (DoIntlv) {
+ MCTMemClr_D(pMCTstat,pDCTstatA);
+ /* Program Interleaving enabled on Node 0 map only.*/
+ MemSize0 <<= bsf(Nodes); /* MemSize=MemSize*2 (or 4, or 8) */
+ Dct0MemSize <<= bsf(Nodes);
+ MemSize0 += HWHoleSz;
+ Base = ((Nodes - 1) << 8) | 3;
+ reg0 = 0x40;
+ Node = 0;
+ while(Node < Nodes) {
+ Set_NB32(dev0, reg0, Base);
+ MemSize = MemSize0;
+ MemSize--;
+ MemSize &= 0xFFFF0000;
+ MemSize |= Node << 8; /* set IntlvSel[2:0] field */
+ MemSize |= Node; /* set DstNode[2:0] field */
+ Set_NB32(dev0, reg0 + 4, MemSize0);
+ reg0 += 8;
+ Node++;
+ }
+
+ /* set base/limit to F1x120/124 per Node */
+ Node = 0;
+ while(Node < Nodes) {
+ pDCTstat = pDCTstatA + Node;
+ pDCTstat->NodeSysBase = 0;
+ MemSize = MemSize0;
+ MemSize -= HWHoleSz;
+ MemSize--;
+ pDCTstat->NodeSysLimit = MemSize;
+ Set_NB32(pDCTstat->dev_map, 0x120, Node << 21);
+ MemSize = MemSize0;
+ MemSize--;
+ MemSize >>= 19;
+ val = Base;
+ val &= 0x700;
+ val <<= 13;
+ val |= MemSize;
+ Set_NB32(pDCTstat->dev_map, 0x124, val);
+
+ if (pMCTstat->GStatus & (1 << GSB_HWHole)) {
+ HoleBase = pMCTstat->HoleBase;
+ if (Dct0MemSize >= HoleBase) {
+ val = HWHoleSz;
+ if( Node == 0) {
+ val += Dct0MemSize;
+ }
+ } else {
+ val = HWHoleSz + Dct0MemSize;
+ }
+
+ val >>= 8; /* DramHoleOffset */
+ HoleBase <<= 8; /* DramHoleBase */
+ val |= HoleBase;
+ val |= 1 << DramMemHoistValid;
+ val |= 1 << DramHoleValid;
+ Set_NB32(pDCTstat->dev_map, 0xF0, val);
+ }
+
+
+ Set_NB32(pDCTstat->dev_dct, 0x114, Dct0MemSize >> 8); /* DctSelBaseOffset */
+ val = Get_NB32(pDCTstat->dev_dct, 0x110);
+ val &= 0x7FF;
+ val |= Dct0MemSize >> 8;
+ Set_NB32(pDCTstat->dev_dct, 0x110, val); /* DctSelBaseAddr */
+ print_tx("InterleaveNodes: DRAM Controller Select Low Register = ", val);
+ Node++;
+ }
+
+
+ /* Copy Node 0 into other Nodes' CSRs */
+ Node = 1;
+ while (Node < Nodes) {
+ pDCTstat = pDCTstatA + Node;
+
+ for (i = 0x40; i <= 0x80; i++) {
+ val = Get_NB32(dev0, i);
+ Set_NB32(pDCTstat->dev_map, i, val);
+ }
+
+ val = Get_NB32(dev0, 0xF0);
+ Set_NB32(pDCTstat->dev_map, 0xF0, val);
+ Node++;
+ }
+ pMCTstat->GStatus = (1 << GSB_NodeIntlv);
+ }
+ print_tx("InterleaveNodes_D: Status ", pDCTstat->Status);
+ print_tx("InterleaveNodes_D: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("InterleaveNodes_D: ErrCode ", pDCTstat->ErrCode);
+ print_t("InterleaveNodes_D: Done\n");
+}
diff --git a/src/northbridge/amd/amdmct/mct/mctpro_d.c b/src/northbridge/amd/amdmct/mct/mctpro_d.c
new file mode 100644
index 0000000000..70d92c6b72
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctpro_d.c
@@ -0,0 +1,406 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+static u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct);
+static u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly);
+
+void EarlySampleSupport_D(void)
+{
+}
+
+
+u32 procOdtWorkaround(struct DCTStatStruc *pDCTstat, u32 dct, u32 val)
+{
+ u32 tmp;
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+ val &= 0x0FFFFFFF;
+ if(pDCTstat->MAdimms[dct] > 1)
+ val |= 0x10000000;
+ }
+
+ return val;
+}
+
+
+u32 OtherTiming_A_D(struct DCTStatStruc *pDCTstat, u32 val)
+{
+ /* Bug#10695:One MEMCLK Bubble Writes Don't Do X4 X8 Switching Correctly
+ * Solution: BIOS should set DRAM Timing High[Twrwr] > 00b
+ * ( F2x[1, 0]8C[1:0] > 00b). Silicon Status: Fixed in Rev B
+ * FIXME: check if this is still required.
+ */
+ u32 tmp;
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+ if(!(val & (3<<12) ))
+ val |= 1<<12;
+ }
+ return val;
+}
+
+
+void mct_ForceAutoPrecharge_D(struct DCTStatStruc *pDCTstat, u32 dct)
+{
+ u32 tmp;
+ u32 reg;
+ u32 reg_off;
+ u32 dev;
+ u32 val;
+
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+ if(CheckNBCOFAutoPrechg(pDCTstat, dct)) {
+ dev = pDCTstat->dev_dct;
+ reg_off = 0x100 * dct;
+ reg = 0x90 + reg_off; /* Dram Configuration Lo */
+ val = Get_NB32(dev, reg);
+ val |= 1<<ForceAutoPchg;
+ if(!pDCTstat->GangedMode)
+ val |= 1<<BurstLength32;
+ Set_NB32(dev, reg, val);
+
+ reg = 0x88 + reg_off; /* cx=Dram Timing Lo */
+ val = Get_NB32(dev, reg);
+ val |= 0x000F0000; /* Trc = 0Fh */
+ Set_NB32(dev, reg, val);
+ }
+ }
+}
+
+
+void mct_EndDQSTraining_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ /* Bug#13341: Prefetch is getting killed when the limit is reached in
+ * PrefDramTrainMode
+ * Solution: Explicitly clear the PrefDramTrainMode bit after training
+ * sequence in order to ensure resumption of normal HW prefetch
+ * behavior.
+ * NOTE -- this has been documented with a note at the end of this
+ * section in the BKDG (although, admittedly, the note does not really
+ * stand out).
+ * Silicon Status: Fixed in Rev B ( confirm)
+ * FIXME: check this.
+ */
+
+ u32 tmp;
+ u32 dev;
+ u32 reg;
+ u32 val;
+ u32 Node;
+
+ for(Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+
+ if(!pDCTstat->NodePresent) break;
+
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+ dev = pDCTstat->dev_dct;
+ reg = 0x11c;
+ val = Get_NB32(dev, reg);
+ val &= ~(1<<PrefDramTrainMode);
+ Set_NB32(dev, reg, val);
+ }
+ }
+}
+
+
+
+
+void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ /* Bug#15115: Uncertainty In The Sync Chain Leads To Setup Violations
+ * In TX FIFO
+ * Solution: BIOS should program DRAM Control Register[RdPtrInit] = 5h,
+ * (F2x[1, 0]78[3:0] = 5h).
+ * Silicon Status: Fixed In Rev B0
+ */
+
+ /* Bug#15880: Determine validity of reset settings for DDR PHY timing
+ * regi..
+ * Solutiuon: At least, set WrDqs fine delay to be 0 for DDR2 training.
+ */
+
+ u32 dev;
+ u32 reg_off;
+ u32 index_reg;
+ u32 index;
+ u32 reg;
+ u32 val;
+ u32 tmp;
+ u32 Channel;
+
+
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+
+ dev = pDCTstat->dev_dct;
+ index = 0;
+
+ for(Channel = 0; Channel<2; Channel++) {
+ index_reg = 0x98 + 0x100 * Channel;
+ val = Get_NB32_index_wait(dev, index_reg, 0x0d004007);
+ val |= 0x3ff;
+ Set_NB32_index_wait(dev, index_reg, 0x0d0f4f07, val);
+ }
+
+ for(Channel = 0; Channel<2; Channel++) {
+ if(pDCTstat->GangedMode && Channel)
+ break;
+ reg_off = 0x100 * Channel;
+ reg = 0x78 + reg_off;
+ val = Get_NB32(dev, reg);
+ val &= ~(0x07);
+ val |= 5;
+ Set_NB32(dev, reg, val);
+ }
+
+ for(Channel = 0; Channel<2; Channel++) {
+ reg_off = 0x100 * Channel;
+ val = 0;
+ index_reg = 0x98 + reg_off;
+ for( index = 0x30; index < (0x45 + 1); index++) {
+ Set_NB32_index_wait(dev, index_reg, index, val);
+ }
+ }
+
+ }
+}
+
+
+u32 Modify_D3CMP(struct DCTStatStruc *pDCTstat, u32 dct, u32 value)
+{
+ /* Errata#189: Reads To Phy Driver Calibration Register and Phy
+ * Predriver Calibration Register Do Not Return Bit 27.
+ * Solution: See #41322 for details.
+ * BIOS can modify bit 27 of the Phy Driver Calibration register
+ * as follows:
+ * 1. Read F2x[1, 0]9C_x09
+ * 2. Read F2x[1, 0]9C_x0D004201
+ * 3. Set F2x[1, 0]9C_x09[27] = F2x[1, 0]9C_x0D004201[10]
+ * BIOS can modify bit 27 of the Phy Predriver Calibration register
+ * as follows:
+ * 1. Read F2x[1, 0]9C_x0A
+ * 2. Read F2x[1, 0]9C_x0D004209
+ * 3. Set F2x[1, 0]9C_x0A[27] = F2x[1, 0]9C_x0D004209[10]
+ * Silicon Status: Fixed planned for DR-B0
+ */
+
+ u32 dev;
+ u32 index_reg;
+ u32 index;
+ u32 val;
+ u32 tmp;
+
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+ dev = pDCTstat->dev_dct;
+ index_reg = 0x98 + 0x100 * dct;
+ index = 0x0D004201;
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ value &= ~(1<<27);
+ value |= ((val>>10) & 1) << 27;
+ }
+ return value;
+}
+
+
+void SyncSetting(struct DCTStatStruc *pDCTstat)
+{
+ /* Errata#198: AddrCmdSetup, CsOdtSetup, and CkeSetup Require Identical
+ * Programming For Both Channels in Ganged Mode
+ * Solution: The BIOS must program the following DRAM timing parameters
+ * the same for both channels:
+ * 1. F2x[1, 0]9C_x04[21] (AddrCmdSetup)
+ * 2. F2x[1, 0]9C_x04[15] (CsOdtSetup)
+ * 3. F2x[1, 0]9C_x04[5]) (CkeSetup)
+ * That is, if the AddrCmdSetup, CsOdtSetup, or CkeSetup is
+ * set to 1'b1 for one of the controllers, then the corresponding
+ * AddrCmdSetup, CsOdtSetup, or CkeSetup must be set to 1'b1 for the
+ * other controller.
+ * Silicon Status: Fix TBD
+ */
+
+ u32 tmp;
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+ pDCTstat->CH_ODC_CTL[1] = pDCTstat->CH_ODC_CTL[0];
+ pDCTstat->CH_ADDR_TMG[1] = pDCTstat->CH_ADDR_TMG[0];
+ }
+}
+
+
+static u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct)
+{
+ u32 ret = 0;
+ u32 lo, hi;
+ u32 msr;
+ u32 val;
+ u32 valx, valy;
+ u32 NbDid;
+
+ /* 3 * (Fn2xD4[NBFid]+4)/(2^NbDid)/(3+Fn2x94[MemClkFreq]) */
+ msr = 0xC0010071;
+ _RDMSR(msr, &lo, &hi);
+ NbDid = (lo>>22) & 1;
+
+ val = Get_NB32(pDCTstat->dev_dct, 0x94 + 0x100 * dct);
+ valx = ((val & 0x07) + 3)<<NbDid;
+ print_tx("MemClk:", valx >> NbDid);
+
+ val = Get_NB32(pDCTstat->dev_nbmisc, 0xd4);
+ valy = ((val & 0x1f) + 4) * 3;
+ print_tx("NB COF:", valy >> NbDid);
+
+ val = valy/valx;
+ if((val==3) && (valy%valx)) /* 3 < NClk/MemClk < 4 */
+ ret = 1;
+
+ return ret;
+}
+
+
+void mct_BeforeDramInit_D(struct DCTStatStruc *pDCTstat, u32 dct)
+{
+ u32 tmp;
+ u32 Speed;
+ u32 ch, ch_start, ch_end;
+ u32 index_reg;
+ u32 index;
+ u32 dev;
+ u32 val;
+
+
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+ Speed = pDCTstat->Speed;
+ /* MemClkFreq = 333MHz or 533Mhz */
+ if((Speed == 3) || (Speed == 2)) {
+ if(pDCTstat->GangedMode) {
+ ch_start = 0;
+ ch_end = 2;
+ } else {
+ ch_start = dct;
+ ch_end = dct+1;
+ }
+ dev = pDCTstat->dev_dct;
+ index = 0x0D00E001;
+ for(ch=ch_start; ch<ch_end; ch++) {
+ index_reg = 0x98 + 0x100 * ch;
+ val = Get_NB32_index(dev, index_reg, 0x0D00E001);
+ val &= ~(0xf0);
+ val |= 0x80;
+ Set_NB32_index(dev, index_reg, 0x0D01E001, val);
+ }
+ }
+
+ }
+}
+
+
+static u8 mct_AdjustDelay_D(struct DCTStatStruc *pDCTstat, u8 dly)
+{
+ u8 skip = 0;
+ dly &= 0x1f;
+ if ((dly >= MIN_FENCE) && (dly <= MAX_FENCE))
+ skip = 1;
+
+ return skip;
+}
+
+
+static u8 mct_checkFenceHoleAdjust_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 DQSDelay,
+ u8 ChipSel, u8 *result)
+{
+ u8 ByteLane;
+ u32 tmp;
+
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+ if (pDCTstat->Direction == DQS_WRITEDIR) {
+ if ((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) {
+ if(DQSDelay == 13) {
+ if (*result == 0xFF) {
+ for (ByteLane = 0; ByteLane < 8; ByteLane++) {
+ pDCTstat->DQSDelay = 13;
+ pDCTstat->ByteLane = ByteLane;
+ /* store the value into the data structure */
+ StoreDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
+ }
+ return 1;
+ }
+ }
+ }
+ if (mct_AdjustDQSPosDelay_D(pDCTstat, DQSDelay)) {
+ *result = 0;
+ }
+ }
+ }
+ return 0;
+}
+
+
+static u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly)
+{
+ u8 skip = 0;
+
+ dly &= 0x1f;
+ if ((dly >= MIN_DQS_WR_FENCE) && (dly <= MAX_DQS_WR_FENCE))
+ skip = 1;
+
+ return skip;
+
+}
+
+static void beforeInterleaveChannels_D(struct DCTStatStruc *pDCTstatA, u8 *enabled) {
+
+ if (pDCTstatA->LogicalCPUID & (AMD_DR_Ax))
+ *enabled = 0;
+}
+
+
+static u8 mctDoAxRdPtrInit_D(struct DCTStatStruc *pDCTstat, u8 *Rdtr)
+{
+ u32 tmp;
+
+ tmp = pDCTstat->LogicalCPUID;
+ if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
+ *Rdtr = 5;
+ return 1;
+ }
+ return 0;
+}
+
+
+static void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request) {
+
+ /* Erratum #202: disable DCache scrubber for Ax parts */
+
+ if (pDCTstat->LogicalCPUID & (AMD_DR_Ax)) {
+ *scrub_request = 0;
+ pDCTstat->ErrStatus |= 1 << SB_DCBKScrubDis;
+ }
+}
+
diff --git a/src/northbridge/amd/amdmct/mct/mctsrc.c b/src/northbridge/amd/amdmct/mct/mctsrc.c
new file mode 100644
index 0000000000..c781ffd6b0
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctsrc.c
@@ -0,0 +1,1121 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/******************************************************************************
+ Description: Receiver En and DQS Timing Training feature for DDR 2 MCT
+******************************************************************************/
+
+static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Pass);
+static u8 mct_SavePassRcvEnDly_D(struct DCTStatStruc *pDCTstat,
+ u8 rcvrEnDly, u8 Channel,
+ u8 receiver, u8 Pass);
+static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 addr, u8 channel,
+ u8 pattern, u8 Pass);
+static void mct_InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Channel);
+static void CalcEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Channel);
+static void mct_SetFinalRcvrEnDly_D(struct DCTStatStruc *pDCTstat,
+ u8 RcvrEnDly, u8 where,
+ u8 Channel, u8 Receiver,
+ u32 dev, u32 index_reg,
+ u8 Addl_Index, u8 Pass);
+static void CalcMaxLatency_D(struct DCTStatStruc *pDCTstat,
+ u8 DQSRcvrEnDly, u8 Channel);
+static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u8 DQSRcvEnDly);
+static void mct_SetDQSRcvEn_D(struct DCTStatStruc *pDCTstat, u32 val);
+static void fenceDynTraining_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct);
+static void mct_DisableDQSRcvEn_D(struct DCTStatStruc *pDCTstat);
+
+
+/* Warning: These must be located so they do not cross a logical 16-bit
+ segment boundary! */
+const static u32 TestPattern0_D[] = {
+ 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa,
+ 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa,
+ 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa,
+ 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa,
+};
+const static u32 TestPattern1_D[] = {
+ 0x55555555, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x55555555, 0x55555555, 0x55555555,
+};
+const static u32 TestPattern2_D[] = {
+ 0x12345678, 0x87654321, 0x23456789, 0x98765432,
+ 0x59385824, 0x30496724, 0x24490795, 0x99938733,
+ 0x40385642, 0x38465245, 0x29432163, 0x05067894,
+ 0x12349045, 0x98723467, 0x12387634, 0x34587623,
+};
+
+static void SetupRcvrPattern(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u32 *buffer, u8 pass)
+{
+ /*
+ * 1. Copy the alpha and Beta patterns from ROM to Cache,
+ * aligning on 16 byte boundary
+ * 2. Set the ptr to DCTStatstruc.PtrPatternBufA for Alpha
+ * 3. Set the ptr to DCTStatstruc.PtrPatternBufB for Beta
+ */
+
+ u32 *buf_a;
+ u32 *buf_b;
+ u32 *p_A;
+ u32 *p_B;
+ u8 i;
+
+ buf_a = (u32 *)(((u32)buffer + 0x10) & (0xfffffff0));
+ buf_b = buf_a + 32; //??
+ p_A = (u32 *)SetupDqsPattern_1PassB(pass);
+ p_B = (u32 *)SetupDqsPattern_1PassA(pass);
+
+ for(i=0;i<16;i++) {
+ buf_a[i] = p_A[i];
+ buf_b[i] = p_B[i];
+ }
+
+ pDCTstat->PtrPatternBufA = (u32)buf_a;
+ pDCTstat->PtrPatternBufB = (u32)buf_b;
+}
+
+
+void mct_TrainRcvrEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Pass)
+{
+ if(mct_checkNumberOfDqsRcvEn_1Pass(Pass))
+ dqsTrainRcvrEn_SW(pMCTstat, pDCTstat, Pass);
+}
+
+
+static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Pass)
+{
+ u8 Channel, RcvrEnDly, RcvrEnDlyRmin;
+ u8 Test0, Test1, CurrTest, CurrTestSide0, CurrTestSide1;
+ u8 CTLRMaxDelay, _2Ranks, PatternA, PatternB;
+ u8 Addl_Index = 0;
+ u8 Receiver;
+ u8 _DisableDramECC = 0, _Wrap32Dis = 0, _SSE2 = 0;
+ u8 RcvrEnDlyLimit, Final_Value, MaxDelay_CH[2];
+ u32 TestAddr0, TestAddr1, TestAddr0B, TestAddr1B;
+ u32 PatternBuffer[64+4]; /* FIXME: need increase 8? */
+ u32 Errors;
+
+ u32 val;
+ u32 reg;
+ u32 dev;
+ u32 index_reg;
+ u32 ch_start, ch_end, ch;
+ u32 msr;
+ u32 cr4;
+ u32 lo, hi;
+
+ u8 valid;
+ u32 tmp;
+ u8 LastTest;
+
+ print_debug_dqs("\nTrainRcvEn: Node", pDCTstat->Node_ID, 0);
+ print_debug_dqs("TrainRcvEn: Pass", Pass, 0);
+
+
+ dev = pDCTstat->dev_dct;
+ ch_start = 0;
+ if(!pDCTstat->GangedMode) {
+ ch_end = 2;
+ } else {
+ ch_end = 1;
+ }
+
+ for (ch = ch_start; ch < ch_end; ch++) {
+ reg = 0x78 + (0x100 * ch);
+ val = Get_NB32(dev, reg);
+ val &= ~(0x3ff << 22);
+ val |= (0x0c8 << 22); /* Max Rd Lat */
+ Set_NB32(dev, reg, val);
+ }
+
+ Final_Value = 1;
+ if (Pass == FirstPass) {
+ mct_InitDQSPos4RcvrEn_D(pMCTstat, pDCTstat);
+ } else {
+ pDCTstat->DimmTrainFail = 0;
+ pDCTstat->CSTrainFail = ~pDCTstat->CSPresent;
+ }
+ print_t("TrainRcvrEn: 1\n");
+
+ cr4 = read_cr4();
+ if(cr4 & ( 1 << 9)) { /* save the old value */
+ _SSE2 = 1;
+ }
+ cr4 |= (1 << 9); /* OSFXSR enable SSE2 */
+ write_cr4(cr4);
+ print_t("TrainRcvrEn: 2\n");
+
+ msr = HWCR;
+ _RDMSR(msr, &lo, &hi);
+ //FIXME: Why use SSEDIS
+ if(lo & (1 << 17)) { /* save the old value */
+ _Wrap32Dis = 1;
+ }
+ lo |= (1 << 17); /* HWCR.wrap32dis */
+ lo &= ~(1 << 15); /* SSEDIS */
+ _WRMSR(msr, lo, hi); /* Setting wrap32dis allows 64-bit memory references in real mode */
+ print_t("TrainRcvrEn: 3\n");
+
+ _DisableDramECC = mct_DisableDimmEccEn_D(pMCTstat, pDCTstat);
+
+
+ if(pDCTstat->Speed == 1) {
+ pDCTstat->T1000 = 5000; /* get the T1000 figure (cycle time (ns)*1K */
+ } else if(pDCTstat->Speed == 2) {
+ pDCTstat->T1000 = 3759;
+ } else if(pDCTstat->Speed == 3) {
+ pDCTstat->T1000 = 3003;
+ } else if(pDCTstat->Speed == 4) {
+ pDCTstat->T1000 = 2500;
+ } else if(pDCTstat->Speed == 5) {
+ pDCTstat->T1000 = 1876;
+ } else {
+ pDCTstat->T1000 = 0;
+ }
+
+ SetupRcvrPattern(pMCTstat, pDCTstat, PatternBuffer, Pass);
+ print_t("TrainRcvrEn: 4\n");
+
+ Errors = 0;
+ dev = pDCTstat->dev_dct;
+ CTLRMaxDelay = 0;
+
+ for (Channel = 0; Channel < 2; Channel++) {
+ print_debug_dqs("\tTrainRcvEn51: Node ", pDCTstat->Node_ID, 1);
+ print_debug_dqs("\tTrainRcvEn51: Channel ", Channel, 1);
+ pDCTstat->Channel = Channel;
+
+ MaxDelay_CH[Channel] = 0;
+ index_reg = 0x98 + 0x100 * Channel;
+
+ Receiver = mct_InitReceiver_D(pDCTstat, Channel);
+ /* There are four receiver pairs, loosely associated with chipselects. */
+ for (; Receiver < 8; Receiver += 2) {
+ Addl_Index = (Receiver >> 1) * 3 + 0x10;
+ LastTest = DQS_FAIL;
+
+ /* mct_ModifyIndex_D */
+ RcvrEnDlyRmin = RcvrEnDlyLimit = 0xff;
+
+ print_debug_dqs("\t\tTrainRcvEnd52: index ", Addl_Index, 2);
+
+ if(!mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver)) {
+ print_t("\t\t\tRank not enabled_D\n");
+ continue;
+ }
+
+ TestAddr0 = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, Channel, Receiver, &valid);
+ if(!valid) { /* Address not supported on current CS */
+ print_t("\t\t\tAddress not supported on current CS\n");
+ continue;
+ }
+
+ TestAddr0B = TestAddr0 + (BigPagex8_RJ8 << 3);
+
+ if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver+1)) {
+ TestAddr1 = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, Channel, Receiver+1, &valid);
+ if(!valid) { /* Address not supported on current CS */
+ print_t("\t\t\tAddress not supported on current CS+1\n");
+ continue;
+ }
+ TestAddr1B = TestAddr1 + (BigPagex8_RJ8 << 3);
+ _2Ranks = 1;
+ } else {
+ _2Ranks = TestAddr1 = TestAddr1B = 0;
+ }
+
+ print_debug_dqs("\t\tTrainRcvEn53: TestAddr0 ", TestAddr0, 2);
+ print_debug_dqs("\t\tTrainRcvEn53: TestAddr0B ", TestAddr0B, 2);
+ print_debug_dqs("\t\tTrainRcvEn53: TestAddr1 ", TestAddr1, 2);
+ print_debug_dqs("\t\tTrainRcvEn53: TestAddr1B ", TestAddr1B, 2);
+
+ /*
+ * Get starting RcvrEnDly value
+ */
+ RcvrEnDly = mct_Get_Start_RcvrEnDly_1Pass(Pass);
+
+ /* mct_GetInitFlag_D*/
+ if (Pass == FirstPass) {
+ pDCTstat->DqsRcvEn_Pass = 0;
+ } else {
+ pDCTstat->DqsRcvEn_Pass=0xFF;
+ }
+ pDCTstat->DqsRcvEn_Saved = 0;
+
+
+ while(RcvrEnDly < RcvrEnDlyLimit) { /* sweep Delay value here */
+ print_debug_dqs("\t\t\tTrainRcvEn541: RcvrEnDly ", RcvrEnDly, 3);
+
+ /* callback not required
+ if(mct_AdjustDelay_D(pDCTstat, RcvrEnDly))
+ goto skipDly;
+ */
+
+ /* Odd steps get another pattern such that even
+ and odd steps alternate. The pointers to the
+ patterns will be swaped at the end of the loop
+ so that they correspond. */
+ if(RcvrEnDly & 1) {
+ PatternA = 1;
+ PatternB = 0;
+ } else {
+ /* Even step */
+ PatternA = 0;
+ PatternB = 1;
+ }
+
+ mct_Write1LTestPattern_D(pMCTstat, pDCTstat, TestAddr0, PatternA); /* rank 0 of DIMM, testpattern 0 */
+ mct_Write1LTestPattern_D(pMCTstat, pDCTstat, TestAddr0B, PatternB); /* rank 0 of DIMM, testpattern 1 */
+ if(_2Ranks) {
+ mct_Write1LTestPattern_D(pMCTstat, pDCTstat, TestAddr1, PatternA); /*rank 1 of DIMM, testpattern 0 */
+ mct_Write1LTestPattern_D(pMCTstat, pDCTstat, TestAddr1B, PatternB); /*rank 1 of DIMM, testpattern 1 */
+ }
+
+ mct_SetRcvrEnDly_D(pDCTstat, RcvrEnDly, 0, Channel, Receiver, dev, index_reg, Addl_Index, Pass);
+
+ CurrTest = DQS_FAIL;
+ CurrTestSide0 = DQS_FAIL;
+ CurrTestSide1 = DQS_FAIL;
+
+ mct_Read1LTestPattern_D(pMCTstat, pDCTstat, TestAddr0); /*cache fills */
+ Test0 = mct_CompareTestPatternQW0_D(pMCTstat, pDCTstat, TestAddr0, Channel, PatternA, Pass);/* ROM vs cache compare */
+ proc_IOCLFLUSH_D(TestAddr0);
+ ResetDCTWrPtr_D(dev, index_reg, Addl_Index);
+
+ print_debug_dqs("\t\t\tTrainRcvEn542: Test0 result ", Test0, 3);
+
+ // != 0x00 mean pass
+
+ if(Test0 == DQS_PASS) {
+ mct_Read1LTestPattern_D(pMCTstat, pDCTstat, TestAddr0B); /*cache fills */
+ /* ROM vs cache compare */
+ Test1 = mct_CompareTestPatternQW0_D(pMCTstat, pDCTstat, TestAddr0B, Channel, PatternB, Pass);
+ proc_IOCLFLUSH_D(TestAddr0B);
+ ResetDCTWrPtr_D(dev, index_reg, Addl_Index);
+
+ print_debug_dqs("\t\t\tTrainRcvEn543: Test1 result ", Test1, 3);
+
+ if(Test1 == DQS_PASS) {
+ CurrTestSide0 = DQS_PASS;
+ }
+ }
+ if(_2Ranks) {
+ mct_Read1LTestPattern_D(pMCTstat, pDCTstat, TestAddr1); /*cache fills */
+ /* ROM vs cache compare */
+ Test0 = mct_CompareTestPatternQW0_D(pMCTstat, pDCTstat, TestAddr1, Channel, PatternA, Pass);
+ proc_IOCLFLUSH_D(TestAddr1);
+ ResetDCTWrPtr_D(dev, index_reg, Addl_Index);
+
+ print_debug_dqs("\t\t\tTrainRcvEn544: Test0 result ", Test0, 3);
+
+ if(Test0 == DQS_PASS) {
+ mct_Read1LTestPattern_D(pMCTstat, pDCTstat, TestAddr1B); /*cache fills */
+ /* ROM vs cache compare */
+ Test1 = mct_CompareTestPatternQW0_D(pMCTstat, pDCTstat, TestAddr1B, Channel, PatternB, Pass);
+ proc_IOCLFLUSH_D(TestAddr1B);
+ ResetDCTWrPtr_D(dev, index_reg, Addl_Index);
+
+ print_debug_dqs("\t\t\tTrainRcvEn545: Test1 result ", Test1, 3);
+ if(Test1 == DQS_PASS) {
+ CurrTestSide1 = DQS_PASS;
+ }
+ }
+ }
+
+ if(_2Ranks) {
+ if ((CurrTestSide0 == DQS_PASS) && (CurrTestSide1 == DQS_PASS)) {
+ CurrTest = DQS_PASS;
+ }
+ } else if (CurrTestSide0 == DQS_PASS) {
+ CurrTest = DQS_PASS;
+ }
+
+
+ /* record first pass DqsRcvEn to stack */
+ valid = mct_SavePassRcvEnDly_D(pDCTstat, RcvrEnDly, Channel, Receiver, Pass);
+
+ /* Break(1:RevF,2:DR) or not(0) FIXME: This comment deosn't make sense */
+ if(valid == 2 || (LastTest == DQS_FAIL && valid == 1)) {
+ RcvrEnDlyRmin = RcvrEnDly;
+ break;
+ }
+
+ LastTest = CurrTest;
+
+ /* swap the rank 0 pointers */
+ tmp = TestAddr0;
+ TestAddr0 = TestAddr0B;
+ TestAddr0B = tmp;
+
+ /* swap the rank 1 pointers */
+ tmp = TestAddr1;
+ TestAddr1 = TestAddr1B;
+ TestAddr1B = tmp;
+
+ print_debug_dqs("\t\t\tTrainRcvEn56: RcvrEnDly ", RcvrEnDly, 3);
+
+ RcvrEnDly++;
+
+ } /* while RcvrEnDly */
+
+ print_debug_dqs("\t\tTrainRcvEn61: RcvrEnDly ", RcvrEnDly, 2);
+ print_debug_dqs("\t\tTrainRcvEn61: RcvrEnDlyRmin ", RcvrEnDlyRmin, 3);
+ print_debug_dqs("\t\tTrainRcvEn61: RcvrEnDlyLimit ", RcvrEnDlyLimit, 3);
+ if(RcvrEnDlyRmin == RcvrEnDlyLimit) {
+ /* no passing window */
+ pDCTstat->ErrStatus |= 1 << SB_NORCVREN;
+ Errors |= 1 << SB_NORCVREN;
+ pDCTstat->ErrCode = SC_FatalErr;
+ }
+
+ if(RcvrEnDly > (RcvrEnDlyLimit - 1)) {
+ /* passing window too narrow, too far delayed*/
+ pDCTstat->ErrStatus |= 1 << SB_SmallRCVR;
+ Errors |= 1 << SB_SmallRCVR;
+ pDCTstat->ErrCode = SC_FatalErr;
+ RcvrEnDly = RcvrEnDlyLimit - 1;
+ pDCTstat->CSTrainFail |= 1 << Receiver;
+ pDCTstat->DimmTrainFail |= 1 << (Receiver + Channel);
+ }
+
+ // CHB_D0_B0_RCVRDLY set in mct_Average_RcvrEnDly_Pass
+ mct_Average_RcvrEnDly_Pass(pDCTstat, RcvrEnDly, RcvrEnDlyLimit, Channel, Receiver, Pass);
+
+ mct_SetFinalRcvrEnDly_D(pDCTstat, RcvrEnDly, Final_Value, Channel, Receiver, dev, index_reg, Addl_Index, Pass);
+
+ if(pDCTstat->ErrStatus & (1 << SB_SmallRCVR)) {
+ Errors |= 1 << SB_SmallRCVR;
+ }
+
+ RcvrEnDly += Pass1MemClkDly;
+ if(RcvrEnDly > CTLRMaxDelay) {
+ CTLRMaxDelay = RcvrEnDly;
+ }
+
+ } /* while Receiver */
+
+ MaxDelay_CH[Channel] = CTLRMaxDelay;
+ } /* for Channel */
+
+ CTLRMaxDelay = MaxDelay_CH[0];
+ if (MaxDelay_CH[1] > CTLRMaxDelay)
+ CTLRMaxDelay = MaxDelay_CH[1];
+
+ for (Channel = 0; Channel < 2; Channel++) {
+ mct_SetMaxLatency_D(pDCTstat, Channel, CTLRMaxDelay); /* program Ch A/B MaxAsyncLat to correspond with max delay */
+ }
+
+ ResetDCTWrPtr_D(dev, index_reg, Addl_Index);
+
+ if(_DisableDramECC) {
+ mct_EnableDimmEccEn_D(pMCTstat, pDCTstat, _DisableDramECC);
+ }
+
+ if (Pass == FirstPass) {
+ /*Disable DQSRcvrEn training mode */
+ print_t("TrainRcvrEn: mct_DisableDQSRcvEn_D\n");
+ mct_DisableDQSRcvEn_D(pDCTstat);
+ }
+
+ if(!_Wrap32Dis) {
+ msr = HWCR;
+ _RDMSR(msr, &lo, &hi);
+ lo &= ~(1<<17); /* restore HWCR.wrap32dis */
+ _WRMSR(msr, lo, hi);
+ }
+ if(!_SSE2){
+ cr4 = read_cr4();
+ cr4 &= ~(1<<9); /* restore cr4.OSFXSR */
+ write_cr4(cr4);
+ }
+
+#if DQS_TRAIN_DEBUG > 0
+ {
+ u8 Channel;
+ print_debug("TrainRcvrEn: CH_MaxRdLat:\n");
+ for(Channel = 0; Channel<2; Channel++) {
+ print_debug("Channel:"); print_debug_hex8(Channel);
+ print_debug(": ");
+ print_debug_hex8( pDCTstat->CH_MaxRdLat[Channel] );
+ print_debug("\n");
+ }
+ }
+#endif
+
+#if DQS_TRAIN_DEBUG > 0
+ {
+ u8 val;
+ u8 Channel, Receiver;
+ u8 i;
+ u8 *p;
+
+ print_debug("TrainRcvrEn: CH_D_B_RCVRDLY:\n");
+ for(Channel = 0; Channel < 2; Channel++) {
+ print_debug("Channel:"); print_debug_hex8(Channel); print_debug("\n");
+ for(Receiver = 0; Receiver<8; Receiver+=2) {
+ print_debug("\t\tReceiver:");
+ print_debug_hex8(Receiver);
+ p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver>>1];
+ print_debug(": ");
+ for (i=0;i<8; i++) {
+ val = p[i];
+ print_debug_hex8(val);
+ print_debug(" ");
+ }
+ print_debug("\n");
+ }
+ }
+ }
+#endif
+
+ print_tx("TrainRcvrEn: Status ", pDCTstat->Status);
+ print_tx("TrainRcvrEn: ErrStatus ", pDCTstat->ErrStatus);
+ print_tx("TrainRcvrEn: ErrCode ", pDCTstat->ErrCode);
+ print_t("TrainRcvrEn: Done\n");
+}
+
+
+static u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ if (pDCTstat->DIMMValidDCT[dct] == 0 ) {
+ return 8;
+ } else {
+ return 0;
+ }
+}
+
+
+static void mct_SetFinalRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u8 RcvrEnDly, u8 where, u8 Channel, u8 Receiver, u32 dev, u32 index_reg, u8 Addl_Index, u8 Pass/*, u8 *p*/)
+{
+ /*
+ * Program final DqsRcvEnDly to additional index for DQS receiver
+ * enabled delay
+ */
+ mct_SetRcvrEnDly_D(pDCTstat, RcvrEnDly, where, Channel, Receiver, dev, index_reg, Addl_Index, Pass);
+}
+
+
+static void mct_DisableDQSRcvEn_D(struct DCTStatStruc *pDCTstat)
+{
+ u8 ch_end, ch;
+ u32 reg;
+ u32 dev;
+ u32 val;
+
+ dev = pDCTstat->dev_dct;
+ if (pDCTstat->GangedMode) {
+ ch_end = 1;
+ } else {
+ ch_end = 2;
+ }
+
+ for (ch=0; ch<ch_end; ch++) {
+ reg = 0x78 + 0x100 * ch;
+ val = Get_NB32(dev, reg);
+ val &= ~(1 << DqsRcvEnTrain);
+ Set_NB32(dev, reg, val);
+ }
+}
+
+
+/* mct_ModifyIndex_D
+ * Function only used once so it was inlined.
+ */
+
+
+/* mct_GetInitFlag_D
+ * Function only used once so it was inlined.
+ */
+
+
+void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u8 RcvrEnDly,
+ u8 FinalValue, u8 Channel, u8 Receiver, u32 dev,
+ u32 index_reg, u8 Addl_Index, u8 Pass)
+{
+ u32 index;
+ u8 i;
+ u8 *p;
+ u32 val;
+
+ if(RcvrEnDly == 0xFE) {
+ /*set the boudary flag */
+ pDCTstat->Status |= 1 << SB_DQSRcvLimit;
+ }
+
+ /* DimmOffset not needed for CH_D_B_RCVRDLY array */
+
+
+ for(i=0; i < 8; i++) {
+ if(FinalValue) {
+ /*calculate dimm offset */
+ p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1];
+ RcvrEnDly = p[i];
+ }
+
+ /* if flag=0, set DqsRcvEn value to reg. */
+ /* get the register index from table */
+ index = Table_DQSRcvEn_Offset[i >> 1];
+ index += Addl_Index; /* DIMMx DqsRcvEn byte0 */
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ if(i & 1) {
+ /* odd byte lane */
+ val &= ~(0xFF << 16);
+ val |= (RcvrEnDly << 16);
+ } else {
+ /* even byte lane */
+ val &= ~0xFF;
+ val |= RcvrEnDly;
+ }
+ Set_NB32_index_wait(dev, index_reg, index, val);
+ }
+
+}
+
+static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u8 DQSRcvEnDly)
+{
+ u32 dev;
+ u32 reg;
+ u16 SubTotal;
+ u32 index_reg;
+ u32 reg_off;
+ u32 val;
+ u32 valx;
+
+ if(pDCTstat->GangedMode)
+ Channel = 0;
+
+ dev = pDCTstat->dev_dct;
+ reg_off = 0x100 * Channel;
+ index_reg = 0x98 + reg_off;
+
+ /* Multiply the CAS Latency by two to get a number of 1/2 MEMCLKs units.*/
+ val = Get_NB32(dev, 0x88 + reg_off);
+ SubTotal = ((val & 0x0f) + 1) << 1; /* SubTotal is 1/2 Memclk unit */
+
+ /* If registered DIMMs are being used then
+ * add 1 MEMCLK to the sub-total.
+ */
+ val = Get_NB32(dev, 0x90 + reg_off);
+ if(!(val & (1 << UnBuffDimm)))
+ SubTotal += 2;
+
+ /* If the address prelaunch is setup for 1/2 MEMCLKs then
+ * add 1, else add 2 to the sub-total.
+ * if (AddrCmdSetup || CsOdtSetup || CkeSetup) then K := K + 2;
+ */
+ val = Get_NB32_index_wait(dev, index_reg, 0x04);
+ if(!(val & 0x00202020))
+ SubTotal += 1;
+ else
+ SubTotal += 2;
+
+ /* If the F2x[1, 0]78[RdPtrInit] field is 4, 5, 6 or 7 MEMCLKs,
+ * then add 4, 3, 2, or 1 MEMCLKs, respectively to the sub-total. */
+ val = Get_NB32(dev, 0x78 + reg_off);
+ SubTotal += 8 - (val & 0x0f);
+
+ /* Convert bits 7-5 (also referred to as the course delay) of
+ * the current (or worst case) DQS receiver enable delay to
+ * 1/2 MEMCLKs units, rounding up, and add this to the sub-total.
+ */
+ SubTotal += DQSRcvEnDly >> 5; /*BOZO-no rounding up */
+
+ /* Add 5.5 to the sub-total. 5.5 represents part of the
+ * processor specific constant delay value in the DRAM
+ * clock domain.
+ */
+ SubTotal <<= 1; /*scale 1/2 MemClk to 1/4 MemClk */
+ SubTotal += 11; /*add 5.5 1/2MemClk */
+
+ /* Convert the sub-total (in 1/2 MEMCLKs) to northbridge
+ * clocks (NCLKs) as follows (assuming DDR400 and assuming
+ * that no P-state or link speed changes have occurred).
+ */
+
+ /* New formula:
+ * SubTotal *= 3*(Fn2xD4[NBFid]+4)/(3+Fn2x94[MemClkFreq])/2 */
+ val = Get_NB32(dev, 0x94 + reg_off);
+
+ /* SubTotal div 4 to scale 1/4 MemClk back to MemClk */
+ val &= 7;
+ if (val == 4) {
+ val++; /* adjust for DDR2-1066 */
+ }
+ valx = (val + 3) << 2;
+
+ val = Get_NB32(pDCTstat->dev_nbmisc, 0xD4);
+ SubTotal *= ((val & 0x1f) + 4 ) * 3;
+
+ SubTotal /= valx;
+ if (SubTotal % valx) { /* round up */
+ SubTotal++;
+ }
+
+ /* Add 5 NCLKs to the sub-total. 5 represents part of the
+ * processor specific constant value in the northbridge
+ * clock domain.
+ */
+ SubTotal += 5;
+
+ pDCTstat->CH_MaxRdLat[Channel] = SubTotal;
+ if(pDCTstat->GangedMode) {
+ pDCTstat->CH_MaxRdLat[1] = SubTotal;
+ }
+
+ /* Program the F2x[1, 0]78[MaxRdLatency] register with
+ * the total delay value (in NCLKs).
+ */
+
+ reg = 0x78 + reg_off;
+ val = Get_NB32(dev, reg);
+ val &= ~(0x3ff << 22);
+ val |= (SubTotal & 0x3ff) << 22;
+
+ /* program MaxRdLatency to correspond with current delay */
+ Set_NB32(dev, reg, val);
+}
+
+
+static u8 mct_SavePassRcvEnDly_D(struct DCTStatStruc *pDCTstat,
+ u8 rcvrEnDly, u8 Channel,
+ u8 receiver, u8 Pass)
+{
+ u8 i;
+ u8 mask_Saved, mask_Pass;
+ u8 *p;
+
+ /* calculate dimm offset
+ * not needed for CH_D_B_RCVRDLY array
+ */
+
+ /* cmp if there has new DqsRcvEnDly to be recorded */
+ mask_Pass = pDCTstat->DqsRcvEn_Pass;
+
+ if(Pass == SecondPass) {
+ mask_Pass = ~mask_Pass;
+ }
+
+ mask_Saved = pDCTstat->DqsRcvEn_Saved;
+ if(mask_Pass != mask_Saved) {
+
+ /* find desired stack offset according to channel/dimm/byte */
+ if(Pass == SecondPass) {
+ // FIXME: SecondPass is never used for Barcelona p = pDCTstat->CH_D_B_RCVRDLY_1[Channel][receiver>>1];
+ p = 0; // Keep the compiler happy.
+ } else {
+ mask_Saved &= mask_Pass;
+ p = pDCTstat->CH_D_B_RCVRDLY[Channel][receiver>>1];
+ }
+ for(i=0; i < 8; i++) {
+ /* cmp per byte lane */
+ if(mask_Pass & (1 << i)) {
+ if(!(mask_Saved & (1 << i))) {
+ /* save RcvEnDly to stack, according to
+ the related Dimm/byte lane */
+ p[i] = (u8)rcvrEnDly;
+ mask_Saved |= 1 << i;
+ }
+ }
+ }
+ pDCTstat->DqsRcvEn_Saved = mask_Saved;
+ }
+ return mct_SaveRcvEnDly_D_1Pass(pDCTstat, Pass);
+}
+
+
+static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 addr, u8 channel,
+ u8 pattern, u8 Pass)
+{
+ /* Compare only the first beat of data. Since target addrs are cache
+ * line aligned, the Channel parameter is used to determine which
+ * cache QW to compare.
+ */
+
+ u8 *test_buf;
+ u8 i;
+ u8 result;
+ u8 *addr_lo_buf;
+
+ SetUpperFSbase(addr); // needed?
+
+ if(Pass == FirstPass) {
+ if(pattern==1) {
+ test_buf = (u8 *)TestPattern1_D;
+ } else {
+ test_buf = (u8 *)TestPattern0_D;
+ }
+ } else { // Second Pass
+ test_buf = (u8 *)TestPattern2_D;
+ }
+
+ addr_lo_buf = (u8 *) (addr << 8);
+ result = DQS_FAIL;
+
+ if((pDCTstat->Status & (1<<SB_128bitmode)) && channel ) {
+ addr_lo_buf += 8; /* second channel */
+ test_buf += 8;
+ }
+
+
+#if DQS_TRAIN_DEBUG > 4
+ print_debug("\t\t\t\t\t\tQW0 : test_buf = ");
+ print_debug_hex32((unsigned)test_buf);
+ print_debug(": ");
+ for (i=0; i<8; i++) {
+ print_debug_hex8(test_buf[i]); print_debug(" ");
+ }
+ print_debug("\n");
+
+ print_debug("\t\t\t\t\t\tQW0 : addr_lo_buf = ");
+ print_debug_hex32((unsigned)addr_lo_buf);
+ print_debug(": ");
+ for (i=0; i<8; i++) {
+ print_debug_hex8(addr_lo_buf[i]); print_debug(" ");
+ }
+ print_debug("\n");
+#endif
+
+ /* prevent speculative execution of following instructions */
+ _EXECFENCE;
+
+ for (i=0; i<8; i++) {
+ if(addr_lo_buf[i] == test_buf[i]) {
+ pDCTstat->DqsRcvEn_Pass |= (1<<i);
+ } else {
+ pDCTstat->DqsRcvEn_Pass &= ~(1<<i);
+ }
+ }
+
+
+ if (Pass == FirstPass) {
+ /* if first pass, at least one byte lane pass
+ * ,then DQS_PASS=1 and will set to related reg.
+ */
+ if(pDCTstat->DqsRcvEn_Pass != 0) {
+ result = DQS_PASS;
+ } else {
+ result = DQS_FAIL;
+ }
+
+ } else {
+ /* if second pass, at least one byte lane fail
+ * ,then DQS_FAIL=1 and will set to related reg.
+ */
+ if(pDCTstat->DqsRcvEn_Pass != 0xFF) {
+ result = DQS_FAIL;
+ } else {
+ result = DQS_PASS;
+ }
+ }
+
+ /* if second pass, we can't find the fail until FFh,
+ * then let it fail to save the final delay
+ */
+ if((Pass == SecondPass) && (pDCTstat->Status & (1 << SB_DQSRcvLimit))) {
+ result = DQS_FAIL;
+ pDCTstat->DqsRcvEn_Pass = 0;
+ }
+
+ /* second pass needs to be inverted
+ * FIXME? this could be inverted in the above code to start with...
+ */
+ if(Pass == SecondPass) {
+ if (result == DQS_PASS) {
+ result = DQS_FAIL;
+ } else if (result == DQS_FAIL) { /* FIXME: doesn't need to be else if */
+ result = DQS_PASS;
+ }
+ }
+
+
+ return result;
+}
+
+
+
+static void mct_InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ /* Initialize the DQS Positions in preparation for
+ * Reciever Enable Training.
+ * Write Position is 1/2 Memclock Delay
+ * Read Position is 1/2 Memclock Delay
+ */
+ u8 i;
+ for(i=0;i<2; i++){
+ InitDQSPos4RcvrEn_D(pMCTstat, pDCTstat, i);
+ }
+}
+
+
+static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Channel)
+{
+ /* Initialize the DQS Positions in preparation for
+ * Reciever Enable Training.
+ * Write Position is no Delay
+ * Read Position is 1/2 Memclock Delay
+ */
+
+ u8 i, j;
+ u32 dword;
+ u8 dn = 2; // TODO: Rev C could be 4
+ u32 dev = pDCTstat->dev_dct;
+ u32 index_reg = 0x98 + 0x100 * Channel;
+
+
+ // FIXME: add Cx support
+ dword = 0x00000000;
+ for(i=1; i<=3; i++) {
+ for(j=0; j<dn; j++)
+ /* DIMM0 Write Data Timing Low */
+ /* DIMM0 Write ECC Timing */
+ Set_NB32_index_wait(dev, index_reg, i + 0x100 * j, dword);
+ }
+
+ /* errata #180 */
+ dword = 0x2f2f2f2f;
+ for(i=5; i<=6; i++) {
+ for(j=0; j<dn; j++)
+ /* DIMM0 Read DQS Timing Control Low */
+ Set_NB32_index_wait(dev, index_reg, i + 0x100 * j, dword);
+ }
+
+ dword = 0x0000002f;
+ for(j=0; j<dn; j++)
+ /* DIMM0 Read DQS ECC Timing Control */
+ Set_NB32_index_wait(dev, index_reg, 7 + 0x100 * j, dword);
+}
+
+
+void SetEccDQSRcvrEn_D(struct DCTStatStruc *pDCTstat, u8 Channel)
+{
+ u32 dev;
+ u32 index_reg;
+ u32 index;
+ u8 ChipSel;
+ u8 *p;
+ u32 val;
+
+ dev = pDCTstat->dev_dct;
+ index_reg = 0x98 + Channel * 0x100;
+ index = 0x12;
+ p = pDCTstat->CH_D_BC_RCVRDLY[Channel];
+ print_debug_dqs("\t\tSetEccDQSRcvrPos: Channel ", Channel, 2);
+ for(ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel += 2) {
+ val = p[ChipSel>>1];
+ Set_NB32_index_wait(dev, index_reg, index, val);
+ print_debug_dqs_pair("\t\tSetEccDQSRcvrPos: ChipSel ",
+ ChipSel, " rcvr_delay ", val, 2);
+ index += 3;
+ }
+}
+
+
+static void CalcEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Channel)
+{
+ u8 ChipSel;
+ u16 EccDQSLike;
+ u8 EccDQSScale;
+ u32 val, val0, val1;
+
+ EccDQSLike = pDCTstat->CH_EccDQSLike[Channel];
+ EccDQSScale = pDCTstat->CH_EccDQSScale[Channel];
+
+ for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel += 2) {
+ if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, ChipSel)) {
+ u8 *p;
+ p = pDCTstat->CH_D_B_RCVRDLY[Channel][ChipSel>>1];
+
+ /* DQS Delay Value of Data Bytelane
+ * most like ECC byte lane */
+ val0 = p[EccDQSLike & 0x07];
+ /* DQS Delay Value of Data Bytelane
+ * 2nd most like ECC byte lane */
+ val1 = p[(EccDQSLike>>8) & 0x07];
+
+ if(val0 > val1) {
+ val = val0 - val1;
+ } else {
+ val = val1 - val0;
+ }
+
+ val *= ~EccDQSScale;
+ val >>= 8; // /256
+
+ if(val0 > val1) {
+ val -= val1;
+ } else {
+ val += val0;
+ }
+
+ pDCTstat->CH_D_BC_RCVRDLY[Channel][ChipSel>>1] = val;
+ }
+ }
+ SetEccDQSRcvrEn_D(pDCTstat, Channel);
+}
+
+void mctSetEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node;
+ u8 i;
+
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+ if (!pDCTstat->NodePresent)
+ break;
+ if (pDCTstat->DCTSysLimit) {
+ for(i=0; i<2; i++)
+ CalcEccDQSRcvrEn_D(pMCTstat, pDCTstat, i);
+ }
+ }
+}
+
+
+void phyAssistedMemFnceTraining(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+
+ u8 Node = 0;
+ struct DCTStatStruc *pDCTstat;
+
+ // FIXME: skip for Ax
+ while (Node < MAX_NODES_SUPPORTED) {
+ pDCTstat = pDCTstatA + Node;
+
+ if(pDCTstat->DCTSysLimit) {
+ fenceDynTraining_D(pMCTstat, pDCTstat, 0);
+ fenceDynTraining_D(pMCTstat, pDCTstat, 1);
+ }
+ Node++;
+ }
+}
+
+
+static void fenceDynTraining_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 dct)
+{
+ u16 avRecValue;
+ u32 val;
+ u32 dev;
+ u32 index_reg = 0x98 + 0x100 * dct;
+ u32 index;
+
+ /* BIOS first programs a seed value to the phase recovery engine
+ * (recommended 19) registers.
+ * Dram Phase Recovery Control Register (F2x[1,0]9C_x[51:50] and
+ * F2x[1,0]9C_x52.) .
+ */
+
+ dev = pDCTstat->dev_dct;
+ for (index = 0x50; index <= 0x52; index ++) {
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ val |= (FenceTrnFinDlySeed & 0x1F);
+ if (index != 0x52) {
+ val &= ~(0xFF << 8);
+ val |= (val & 0xFF) << 8;
+ val &= 0xFFFF;
+ val |= val << 16;
+ }
+ Set_NB32_index_wait(dev, index_reg, index, val);
+ }
+
+
+ /* Set F2x[1,0]9C_x08[PhyFenceTrEn]=1. */
+ val = Get_NB32_index_wait(dev, index_reg, 0x08);
+ val |= 1 << PhyFenceTrEn;
+ Set_NB32_index_wait(dev, index_reg, 0x08, val);
+
+ /* Wait 200 MEMCLKs. */
+ mct_Wait_10ns (20000); /* wait 200us */
+
+ /* Clear F2x[1,0]9C_x08[PhyFenceTrEn]=0. */
+ val = Get_NB32_index_wait(dev, index_reg, 0x08);
+ val &= ~(1 << PhyFenceTrEn);
+ Set_NB32_index_wait(dev, index_reg, 0x08, val);
+
+ /* BIOS reads the phase recovery engine registers
+ * F2x[1,0]9C_x[51:50] and F2x[1,0]9C_x52. */
+ avRecValue = 0;
+ for (index = 0x50; index <= 0x52; index ++) {
+ val = Get_NB32_index_wait(dev, index_reg, index);
+ avRecValue += val & 0x7F;
+ if (index != 0x52) {
+ avRecValue += (val >> 8) & 0x7F;
+ avRecValue += (val >> 16) & 0x7F;
+ avRecValue += (val >> 24) & 0x7F;
+ }
+ }
+
+ val = avRecValue / 9;
+ if (avRecValue % 9)
+ val++;
+ avRecValue = val;
+
+ /* Write the (averaged value -8) to F2x[1,0]9C_x0C[PhyFence]. */
+ avRecValue -= 8;
+ val = Get_NB32_index_wait(dev, index_reg, 0x0C);
+ val &= ~(0x1F << 16);
+ val |= (avRecValue & 0x1F) << 16;
+ Set_NB32_index_wait(dev, index_reg, 0x0C, val);
+
+ /* Rewrite F2x[1,0]9C_x04-DRAM Address/Command Timing Control Register
+ * delays (both channels). */
+ val = Get_NB32_index_wait(dev, index_reg, 0x04);
+ Set_NB32_index_wait(dev, index_reg, 0x04, val);
+}
+
+
+static void mct_Wait_10ns (u32 cycles)
+{
+ u32 saved, i;
+ u32 hi, lo, msr;
+
+ /* cycles = number of 10ns cycles(or longer) to delay */
+ /* FIXME: Need to calibrate to CPU/NCLK speed? */
+
+ msr = 0x10; /* TSC */
+ for (i = 0; i < cycles; i++) {
+ _RDMSR(msr, &lo, &hi);
+ saved = lo;
+
+ do {
+ _RDMSR(msr, &lo, &hi);
+ } while (lo - saved < 8); /* 8 x 1.25 ns as NCLK is at 1.25ns */
+ }
+}
diff --git a/src/northbridge/amd/amdmct/mct/mctsrc1p.c b/src/northbridge/amd/amdmct/mct/mctsrc1p.c
new file mode 100644
index 0000000000..31d2af8955
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctsrc1p.c
@@ -0,0 +1,96 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+u8 mct_checkNumberOfDqsRcvEn_1Pass(u8 pass)
+{
+ u8 ret = 1;
+ if (pass == SecondPass)
+ ret = 0;
+
+ return ret;
+}
+
+
+u32 SetupDqsPattern_1PassA(u8 pass)
+{
+ return (u32) TestPattern1_D;
+}
+
+
+u32 SetupDqsPattern_1PassB(u8 pass)
+{
+ return (u32) TestPattern0_D;
+}
+
+u8 mct_Get_Start_RcvrEnDly_1Pass(u8 pass)
+{
+ return 0;
+}
+
+u8 mct_Average_RcvrEnDly_1Pass(struct DCTStatStruc *pDCTstat, u8 Channel, u8 Receiver,
+ u8 Pass)
+{
+ u8 i, MaxValue;
+ u8 *p;
+ u8 val;
+
+ MaxValue = 0;
+ p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1];
+
+ for(i=0; i < 8; i++) {
+ /* get left value from DCTStatStruc.CHA_D0_B0_RCVRDLY*/
+ val = p[i];
+ /* get right value from DCTStatStruc.CHA_D0_B0_RCVRDLY_1*/
+ val += Pass1MemClkDly;
+ /* write back the value to stack */
+ if (val > MaxValue)
+ MaxValue = val;
+
+ p[i] = val;
+ }
+// pDCTstat->DimmTrainFail &= ~(1<<Receiver+Channel);
+
+ return MaxValue;
+}
+
+
+
+u8 mct_AdjustFinalDQSRcvValue_1Pass(u8 val_1p, u8 val_2p)
+{
+ return (val_1p & 0xff) + ((val_2p & 0xff)<<8);
+}
+
+
+u8 mct_SaveRcvEnDly_D_1Pass(struct DCTStatStruc *pDCTstat, u8 pass)
+{
+ u8 ret;
+ ret = 0;
+ if((pDCTstat->DqsRcvEn_Pass == 0xff) && (pass== FirstPass))
+ ret = 2;
+ return ret;
+}
+
+u8 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
+ u8 RcvrEnDly, u8 RcvrEnDlyLimit,
+ u8 Channel, u8 Receiver, u8 Pass)
+
+{
+ return mct_Average_RcvrEnDly_1Pass(pDCTstat, Channel, Receiver, Pass);
+}
diff --git a/src/northbridge/amd/amdmct/mct/mctsrc2p.c b/src/northbridge/amd/amdmct/mct/mctsrc2p.c
new file mode 100644
index 0000000000..5912513053
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mctsrc2p.c
@@ -0,0 +1,139 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+u8 mct_checkNumberOfDqsRcvEn_Pass(u8 pass)
+{
+ return 1;
+}
+
+
+u32 SetupDqsPattern_PassA(u8 Pass)
+{
+ u32 ret;
+ if(Pass == FirstPass)
+ ret = (u32) TestPattern1_D;
+ else
+ ret = (u32) TestPattern2_D;
+
+ return ret;
+}
+
+
+u32 SetupDqsPattern_PassB(u8 Pass)
+{
+ u32 ret;
+ if(Pass == FirstPass)
+ ret = (u32) TestPattern0_D;
+ else
+ ret = (u32) TestPattern2_D;
+
+ return ret;
+}
+
+
+u8 mct_Get_Start_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
+ u8 Channel, u8 Receiver,
+ u8 Pass)
+{
+ u8 RcvrEnDly;
+
+ if (Pass == FirstPass)
+ RcvrEnDly = 0;
+ else {
+ u8 max = 0;
+ u8 val;
+ u8 i;
+ u8 *p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver>>1];
+ u8 bn;
+ bn = 8;
+// print_tx("mct_Get_Start_RcvrEnDly_Pass: Channel:", Channel);
+// print_tx("mct_Get_Start_RcvrEnDly_Pass: Receiver:", Receiver);
+ for ( i=0;i<bn; i++) {
+ val = p[i];
+// print_tx("mct_Get_Start_RcvrEnDly_Pass: i:", i);
+// print_tx("mct_Get_Start_RcvrEnDly_Pass: val:", val);
+ if(val > max) {
+ max = val;
+ }
+ }
+ RcvrEnDly = max;
+// while(1) {; }
+// RcvrEnDly += secPassOffset; //FIXME Why
+ }
+
+ return RcvrEnDly;
+}
+
+
+
+u8 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
+ u8 RcvrEnDly, u8 RcvrEnDlyLimit,
+ u8 Channel, u8 Receiver, u8 Pass)
+{
+ u8 i;
+ u8 *p;
+ u8 *p_1;
+ u8 val;
+ u8 val_1;
+ u8 valid = 1;
+ u8 bn;
+
+ bn = 8;
+
+ p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver>>1];
+
+ if (Pass == SecondPass) { /* second pass must average values */
+ //FIXME: which byte?
+ p_1 = pDCTstat->B_RCVRDLY_1;
+// p_1 = pDCTstat->CH_D_B_RCVRDLY_1[Channel][Receiver>>1];
+ for(i=0; i<bn; i++) {
+ val = p[i];
+ /* left edge */
+ if (val != (RcvrEnDlyLimit - 1)) {
+ val -= Pass1MemClkDly;
+ val_1 = p_1[i];
+ val += val_1;
+ val >>= 1;
+ p[i] = val;
+ } else {
+ valid = 0;
+ break;
+ }
+ }
+ if (!valid) {
+ pDCTstat->ErrStatus |= 1<<SB_NORCVREN;
+ } else {
+ pDCTstat->DimmTrainFail &= ~(1<<(Receiver + Channel));
+ }
+ } else {
+ for(i=0; i < bn; i++) {
+ val = p[i];
+ /* Add 1/2 Memlock delay */
+ //val += Pass1MemClkDly;
+ val += 0x5; // NOTE: middle value with DQSRCVEN_SAVED_GOOD_TIMES
+ //val += 0x02;
+ p[i] = val;
+ pDCTstat->DimmTrainFail &= ~(1<<(Receiver + Channel));
+ }
+ }
+
+ return RcvrEnDly;
+}
diff --git a/src/northbridge/amd/amdmct/mct/mcttmrl.c b/src/northbridge/amd/amdmct/mct/mcttmrl.c
new file mode 100644
index 0000000000..f7763d13b8
--- /dev/null
+++ b/src/northbridge/amd/amdmct/mct/mcttmrl.c
@@ -0,0 +1,413 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+/*
+ * Description: Max Read Latency Training feature for DDR 2 MCT
+ */
+
+static u8 CompareMaxRdLatTestPattern_D(u32 pattern_buf, u32 addr);
+static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Channel,
+ u8 *MaxRcvrEnDly, u8 *valid);
+u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat, u8 Channel,
+ u8 DQSRcvEnDly, u32 *Margin);
+static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat);
+static void mct_setMaxRdLatTrnVal_D(struct DCTStatStruc *pDCTstat, u8 Channel,
+ u16 MaxRdLatVal);
+
+/*Warning: These must be located so they do not cross a logical 16-bit
+ segment boundary!*/
+static const u32 TestMaxRdLAtPattern_D[] = {
+ 0x6E0E3FAC, 0x0C3CFF52,
+ 0x4A688181, 0x49C5B613,
+ 0x7C780BA6, 0x5C1650E3,
+ 0x0C4F9D76, 0x0C6753E6,
+ 0x205535A5, 0xBABFB6CA,
+ 0x610E6E5F, 0x0C5F1C87,
+ 0x488493CE, 0x14C9C383,
+ 0xF5B9A5CD, 0x9CE8F615,
+
+ 0xAAD714B5, 0xC38F1B4C,
+ 0x72ED647C, 0x669F7562,
+ 0x5233F802, 0x4A898B30,
+ 0x10A40617, 0x3326B465,
+ 0x55386E04, 0xC807E3D3,
+ 0xAB49E193, 0x14B4E63A,
+ 0x67DF2495, 0xEA517C45,
+ 0x7624CE51, 0xF8140C51,
+
+ 0x4824BD23, 0xB61DD0C9,
+ 0x072BCFBE, 0xE8F3807D,
+ 0x919EA373, 0x25E30C47,
+ 0xFEB12958, 0x4DA80A5A,
+ 0xE9A0DDF8, 0x792B0076,
+ 0xE81C73DC, 0xF025B496,
+ 0x1DB7E627, 0x808594FE,
+ 0x82668268, 0x655C7783,
+};
+
+
+static u32 SetupMaxRdPattern(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u32 *buffer)
+{
+ /* 1. Copy the alpha and Beta patterns from ROM to Cache,
+ * aligning on 16 byte boundary
+ * 2. Set the ptr to Cacheable copy in DCTStatstruc.PtrPatternBufA
+ * for Alpha
+ * 3. Set the ptr to Cacheable copy in DCTStatstruc.PtrPatternBufB
+ * for Beta
+ */
+
+ u32 *buf;
+ u8 i;
+
+ buf = (u32 *)(((u32)buffer + 0x10) & (0xfffffff0));
+
+ for(i = 0; i < (16 * 3); i++) {
+ buf[i] = TestMaxRdLAtPattern_D[i];
+ }
+
+ return (u32)buf;
+
+}
+
+
+void TrainMaxReadLatency_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstatA)
+{
+ u8 Node;
+
+ for(Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+
+ if(!pDCTstat->NodePresent)
+ break;
+
+ if(pDCTstat->DCTSysLimit)
+ maxRdLatencyTrain_D(pMCTstat, pDCTstat);
+ }
+}
+
+
+static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat)
+{
+ u8 Channel;
+ u32 TestAddr0;
+ u8 _DisableDramECC = 0, _Wrap32Dis = 0, _SSE2 = 0;
+ u16 MaxRdLatDly;
+ u8 RcvrEnDly = 0;
+ u32 PatternBuffer[60]; // FIXME: why not 48 + 4
+ u32 Margin;
+ u32 addr;
+ u32 cr4;
+ u32 lo, hi;
+
+ u8 valid;
+ u32 pattern_buf;
+
+ cr4 = read_cr4();
+ if(cr4 & (1<<9)) { /* save the old value */
+ _SSE2 = 1;
+ }
+ cr4 |= (1<<9); /* OSFXSR enable SSE2 */
+ write_cr4(cr4);
+
+ addr = HWCR;
+ _RDMSR(addr, &lo, &hi);
+ if(lo & (1<<17)) { /* save the old value */
+ _Wrap32Dis = 1;
+ }
+ lo |= (1<<17); /* HWCR.wrap32dis */
+ lo &= ~(1<<15); /* SSEDIS */
+ /* Setting wrap32dis allows 64-bit memory references in
+ real mode */
+ _WRMSR(addr, lo, hi);
+
+ _DisableDramECC = mct_DisableDimmEccEn_D(pMCTstat, pDCTstat);
+
+ pattern_buf = SetupMaxRdPattern(pMCTstat, pDCTstat, PatternBuffer);
+
+ for (Channel = 0; Channel < 2; Channel++) {
+ print_debug_dqs("\tMaxRdLatencyTrain51: Channel ",Channel, 1);
+ pDCTstat->Channel = Channel;
+
+ if( (pDCTstat->Status & (1 << SB_128bitmode)) && Channel)
+ break; /*if ganged mode, skip DCT 1 */
+
+ TestAddr0 = GetMaxRdLatTestAddr_D(pMCTstat, pDCTstat, Channel, &RcvrEnDly, &valid);
+ if(!valid) /* Address not supported on current CS */
+ continue;
+ /* rank 1 of DIMM, testpattern 0 */
+ WriteMaxRdLat1CLTestPattern_D(pattern_buf, TestAddr0);
+
+ MaxRdLatDly = mct_GetStartMaxRdLat_D(pMCTstat, pDCTstat, Channel, RcvrEnDly, &Margin);
+ print_debug_dqs("\tMaxRdLatencyTrain52: MaxRdLatDly start ", MaxRdLatDly, 2);
+ print_debug_dqs("\tMaxRdLatencyTrain52: MaxRdLatDly Margin ", Margin, 2);
+ while(MaxRdLatDly < MAX_RD_LAT) { /* sweep Delay value here */
+ mct_setMaxRdLatTrnVal_D(pDCTstat, Channel, MaxRdLatDly);
+ ReadMaxRdLat1CLTestPattern_D(TestAddr0);
+ if( CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS)
+ break;
+ SetTargetWTIO_D(TestAddr0);
+ FlushMaxRdLatTestPattern_D(TestAddr0);
+ ResetTargetWTIO_D();
+ MaxRdLatDly++;
+ }
+ print_debug_dqs("\tMaxRdLatencyTrain53: MaxRdLatDly end ", MaxRdLatDly, 2);
+ mct_setMaxRdLatTrnVal_D(pDCTstat, Channel, MaxRdLatDly + Margin);
+ }
+
+ if(_DisableDramECC) {
+ mct_EnableDimmEccEn_D(pMCTstat, pDCTstat, _DisableDramECC);
+ }
+
+ if(!_Wrap32Dis) {
+ addr = HWCR;
+ _RDMSR(addr, &lo, &hi);
+ lo &= ~(1<<17); /* restore HWCR.wrap32dis */
+ _WRMSR(addr, lo, hi);
+ }
+ if(!_SSE2){
+ cr4 = read_cr4();
+ cr4 &= ~(1<<9); /* restore cr4.OSFXSR */
+ write_cr4(cr4);
+ }
+
+#if DQS_TRAIN_DEBUG > 0
+ {
+ u8 Channel;
+ print_debug("maxRdLatencyTrain: CH_MaxRdLat:\n");
+ for(Channel = 0; Channel<2; Channel++) {
+ print_debug("Channel:"); print_debug_hex8(Channel);
+ print_debug(": ");
+ print_debug_hex8( pDCTstat->CH_MaxRdLat[Channel] );
+ print_debug("\n");
+ }
+ }
+#endif
+
+}
+
+static void mct_setMaxRdLatTrnVal_D(struct DCTStatStruc *pDCTstat,
+ u8 Channel, u16 MaxRdLatVal)
+{
+ u8 i;
+ u32 reg;
+ u32 dev;
+ u32 val;
+
+ if (pDCTstat->GangedMode) {
+ Channel = 0; // for safe
+ for (i=0; i<2; i++)
+ pDCTstat->CH_MaxRdLat[i] = MaxRdLatVal;
+ } else {
+ pDCTstat->CH_MaxRdLat[Channel] = MaxRdLatVal;
+ }
+
+ dev = pDCTstat->dev_dct;
+ reg = 0x78 + Channel * 0x100;
+ val = Get_NB32(dev, reg);
+ val &= ~(0x3ff<<22);
+ val |= MaxRdLatVal<<22;
+ /* program MaxRdLatency to correspond with current delay */
+ Set_NB32(dev, reg, val);
+
+}
+
+
+static u8 CompareMaxRdLatTestPattern_D(u32 pattern_buf, u32 addr)
+{
+ /* Compare only the first beat of data. Since target addrs are cache
+ * line aligned, the Channel parameter is used to determine which cache
+ * QW to compare.
+ */
+
+ u32 *test_buf = (u32 *)pattern_buf;
+ u32 addr_lo;
+ u32 val, val_test;
+ int i;
+ u8 ret = DQS_PASS;
+
+ SetUpperFSbase(addr);
+ addr_lo = addr<<8;
+
+ _EXECFENCE;
+ for (i=0; i<(16*3); i++) {
+ val = read32_fs(addr_lo);
+ val_test = test_buf[i];
+
+ print_debug_dqs_pair("\t\t\t\t\t\ttest_buf = ", (u32)test_buf, " value = ", val_test, 5);
+ print_debug_dqs_pair("\t\t\t\t\t\ttaddr_lo = ", addr_lo, " value = ", val, 5);
+ if(val != val_test) {
+ ret = DQS_FAIL;
+ break;
+ }
+ addr_lo += 4;
+ }
+
+ return ret;
+}
+
+static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 Channel, u8 *MaxRcvrEnDly,
+ u8 *valid)
+{
+ u8 Max = 0;
+
+ u8 Channel_Max = 0;
+ u8 d;
+ u8 d_Max = 0;
+
+ u8 Byte;
+ u32 TestAddr0 = 0;
+ u8 ch, ch_start, ch_end;
+ u8 bn;
+
+ bn = 8;
+
+ if(pDCTstat->Status & (1 << SB_128bitmode)) {
+ ch_start = 0;
+ ch_end = 2;
+ } else {
+ ch_start = Channel;
+ ch_end = Channel + 1;
+ }
+
+ *valid = 0;
+
+ for(ch = ch_start; ch < ch_end; ch++) {
+ for(d=0; d<4; d++) {
+ for(Byte = 0; Byte<bn; Byte++) {
+ u8 tmp;
+ tmp = pDCTstat->CH_D_B_RCVRDLY[ch][d][Byte];
+ if(tmp>Max) {
+ Max = tmp;
+ Channel_Max = Channel;
+ d_Max = d;
+ }
+ }
+ }
+ }
+
+ if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel_Max, d_Max << 1)) {
+ TestAddr0 = mct_GetMCTSysAddr_D(pMCTstat, pDCTstat, Channel_Max, d_Max << 1, valid);
+ }
+
+ if(*valid)
+ *MaxRcvrEnDly = Max;
+
+ return TestAddr0;
+
+}
+
+u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat,
+ struct DCTStatStruc *pDCTstat,
+ u8 Channel, u8 DQSRcvEnDly, u32 *Margin)
+{
+ u32 SubTotal;
+ u32 val;
+ u32 valx;
+ u32 valxx;
+ u32 index_reg;
+ u32 reg_off;
+ u32 dev;
+
+ if(pDCTstat->GangedMode)
+ Channel = 0;
+
+ index_reg = 0x98 + 0x100 * Channel;
+
+ reg_off = 0x100 * Channel;
+ dev = pDCTstat->dev_dct;
+
+ /* Multiply the CAS Latency by two to get a number of 1/2 MEMCLKs units.*/
+ val = Get_NB32(dev, 0x88 + reg_off);
+ SubTotal = ((val & 0x0f) + 1) << 1; /* SubTotal is 1/2 Memclk unit */
+
+ /* If registered DIMMs are being used then add 1 MEMCLK to the sub-total*/
+ val = Get_NB32(dev, 0x90 + reg_off);
+ if(!(val & (1 << UnBuffDimm)))
+ SubTotal += 2;
+
+ /*If the address prelaunch is setup for 1/2 MEMCLKs then add 1,
+ * else add 2 to the sub-total. if (AddrCmdSetup || CsOdtSetup
+ * || CkeSetup) then K := K + 2; */
+ val = Get_NB32_index_wait(dev, index_reg, 0x04);
+ if(!(val & 0x00202020))
+ SubTotal += 1;
+ else
+ SubTotal += 2;
+
+ /* If the F2x[1, 0]78[RdPtrInit] field is 4, 5, 6 or 7 MEMCLKs,
+ * then add 4, 3, 2, or 1 MEMCLKs, respectively to the sub-total. */
+ val = Get_NB32(dev, 0x78 + reg_off);
+ SubTotal += 8 - (val & 0x0f);
+
+ /* Convert bits 7-5 (also referred to as the course delay) of the current
+ * (or worst case) DQS receiver enable delay to 1/2 MEMCLKs units,
+ * rounding up, and add this to the sub-total. */
+ SubTotal += DQSRcvEnDly >> 5; /*BOZO-no rounding up */
+
+ SubTotal <<= 1; /*scale 1/2 MemClk to 1/4 MemClk */
+
+ /* Convert the sub-total (in 1/2 MEMCLKs) to northbridge clocks (NCLKs)
+ * as follows (assuming DDR400 and assuming that no P-state or link speed
+ * changes have occurred). */
+
+ /*New formula:
+ SubTotal *= 3*(Fn2xD4[NBFid]+4)/(3+Fn2x94[MemClkFreq])/2 */
+ val = Get_NB32(dev, 0x94 + reg_off);
+ /* SubTotal div 4 to scale 1/4 MemClk back to MemClk */
+ val &= 7;
+ if (val == 4) {
+ val++; /* adjust for DDR2-1066 */
+ }
+ valx = (val + 3) << 2; /* SubTotal div 4 to scale 1/4 MemClk back to MemClk */
+
+
+ val = Get_NB32(pDCTstat->dev_nbmisc, 0xD4);
+ val = ((val & 0x1f) + 4 ) * 3;
+
+ /* Calculate 1 MemClk + 1 NCLK delay in NCLKs for margin */
+ valxx = val << 2;
+ valxx /= valx;
+ if (valxx % valx)
+ valxx++; /* round up */
+ valxx++; /* add 1NCLK */
+ *Margin = valxx; /* one MemClk delay in NCLKs and one additional NCLK */
+
+ val *= SubTotal;
+
+ val /= valx;
+ if (val % valx)
+ val++; /* round up */
+
+
+
+ return val;
+}
+
+
diff --git a/src/northbridge/amd/amdmct/wrappers/mcti.h b/src/northbridge/amd/amdmct/wrappers/mcti.h
new file mode 100644
index 0000000000..455b7b7c30
--- /dev/null
+++ b/src/northbridge/amd/amdmct/wrappers/mcti.h
@@ -0,0 +1,59 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#define SERVER 0
+#define DESKTOP 1
+//#define MOBILE 2
+#define REV_F 0
+#define REV_DR 1
+#define REV_FDR 2
+
+
+/*----------------------------------------------------------------------------
+COMMENT OUT ALL BUT 1
+----------------------------------------------------------------------------*/
+//#define BUILD_VERSION REV_F /*BIOS supports rev F only*/
+//#define BUILD_VERSION REV_DR /*BIOS supports rev 10 only*/
+//#define BUILD_VERSION REV_FDR /*BIOS supports both rev F and 10*/
+
+/*----------------------------------------------------------------------------
+COMMENT OUT ALL BUT 1
+----------------------------------------------------------------------------*/
+#ifndef SYSTEM_TYPE
+#define SYSTEM_TYPE SERVER
+//#define SYSTEM_TYPE DESKTOP
+//#define SYSTEM_TYPE MOBILE
+#endif
+
+/*----------------------------------------------------------------------------
+COMMENT OUT ALL BUT 1
+----------------------------------------------------------------------------*/
+#define UMA_SUPPORT 0 /*Not supported */
+//#define UMA_SUPPORT 1 /*Supported */
+
+/*----------------------------------------------------------------------------
+UPDATE AS NEEDED
+----------------------------------------------------------------------------*/
+#define MAX_NODES_SUPPORTED 8
+#define MAX_DIMMS_SUPPORTED 8
+#define MAX_CS_SUPPORTED 8
+#define MCT_TRNG_KEEPOUT_START 0x00000C00
+#define MCT_TRNG_KEEPOUT_END 0x00000CFF
+
diff --git a/src/northbridge/amd/amdmct/wrappers/mcti_d.c b/src/northbridge/amd/amdmct/wrappers/mcti_d.c
new file mode 100644
index 0000000000..41afed52ff
--- /dev/null
+++ b/src/northbridge/amd/amdmct/wrappers/mcti_d.c
@@ -0,0 +1,338 @@
+/*
+ * This file is part of the LinuxBIOS project.
+ *
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Call-backs */
+
+u16 mctGet_NVbits(u8 index)
+{
+ u16 val = 0;
+
+ switch (index) {
+ case NV_PACK_TYPE:
+#if SYSTEM_TYPE == SERVER
+ val = 0;
+#elif SYSTEM_TYPE == DESKTOP
+ val = 1;
+//#elif SYSTEM_TYPE == MOBILE
+// val = 2;
+#endif
+ break;
+ case NV_MAX_NODES:
+ val = MAX_NODES_SUPPORTED;
+ break;
+ case NV_MAX_DIMMS:
+ //val = MAX_DIMMS_SUPPORTED;
+ val = 8;
+ break;
+ case NV_MAX_MEMCLK:
+ /* Maximum platform supported memclk */
+ //val = 200; /* 200MHz(DDR400) */
+ //val = 266; /* 266MHz(DDR533) */
+ //val = 333; /* 333MHz(DDR667) */
+ val = 400; /* 400MHz(DDR800) */
+ break;
+ case NV_ECC_CAP:
+#if SYSTEM_TYPE == SERVER
+ val = 1; /* memory bus ECC capable */
+#else
+ val = 0; /* memory bus ECC not capable */
+#endif
+ break;
+ case NV_4RANKType:
+ /* Quad Rank DIMM slot type */
+ val = 0; /* normal */
+ //val = 1; /* R4 (registered DIMMs in AMD server configuration) */
+ //val = 2; /* S4 (Unbuffered SO-DIMMS) */
+ break;
+ case NV_BYPMAX:
+#if (UMA_SUPPORT == 0)
+ val = 4;
+#elif (UMA_SUPPORT == 1)
+ val = 7;
+#endif
+ break;
+ case NV_RDWRQBYP:
+#if (UMA_SUPPORT == 0)
+ val = 2;
+#elif (UMA_SUPPORT == 1)
+ val = 3;
+#endif
+ break;
+ case NV_MCTUSRTMGMODE:
+ val = 0; /* Automatic (recommended) */
+ //val = 1; /* Limited */
+ //val = 2; /* Manual */
+ break;
+ case NV_MemCkVal:
+ //val = 0; /* 200MHz */
+ //val = 1; /* 266MHz */
+ val = 2; /* 333MHz */
+ break;
+ case NV_BankIntlv:
+ /* Bank (chip select) interleaving */
+ //val = 0; /* disabled */
+ val = 1; /* enabled (recommended) */
+ break;
+ case NV_MemHole:
+ //val = 0; /* Disabled */
+ val = 1; /* Enabled (recommended) */
+ break;
+ case NV_AllMemClks:
+ val = 0; /* Normal (only to slots that have enabled DIMMs) */
+ //val = 1; /* Enable all memclocks */
+ break;
+ case NV_SPDCHK_RESTRT:
+ val = 0; /* Exit current node initialization if any DIMM has SPD checksum error */
+ //val = 1; /* Ignore faulty SPD checksum (DIMM will still be disabled), continue current node intialization */
+ break;
+ case NV_DQSTrainCTL:
+ //val = 0; /*Skip dqs training */
+ val = 1; /* Perform dqs training */
+ break;
+ case NV_NodeIntlv:
+ val = 0; /* Disabled (recommended) */
+ //val = 1; /* Enable */
+ break;
+ case NV_BurstLen32:
+#if (UMA_SUPPORT == 0)
+ val = 0; /* 64 byte mode */
+#elif (UMA_SUPPORT == 1)
+ val = 1; /* 32 byte mode */
+#endif
+ break;
+ case NV_CKE_PDEN:
+ //val = 0; /* Disable */
+ val = 1; /* Enable */
+ break;
+ case NV_CKE_CTL:
+ val = 0; /* per channel control */
+ //val = 1; /* per chip select control */
+ break;
+ case NV_CLKHZAltVidC3:
+ val = 0; /* disable */
+ //val = 1; /* enable */
+ break;
+ case NV_BottomIO:
+ val = 0xC0; /* address bits [31:24] */
+ break;
+ case NV_BottomUMA:
+#if (UMA_SUPPORT == 0)
+ val = 0xC0; /* address bits [31:24] */
+#elif (UMA_SUPPORT == 1)
+ val = 0xB0; /* address bits [31:24] */
+#endif
+ break;
+ case NV_ECC:
+#if (SYSTEM_TYPE == SERVER)
+ val = 1; /* Enable */
+#else
+ val = 0; /* Disable */
+#endif
+ break;
+ case NV_NBECC:
+#if (SYSTEM_TYPE == SERVER)
+ val = 1; /* Enable */
+#else
+ val = 0; /* Disable */
+#endif
+ break;
+ case NV_ChipKill:
+#if (SYSTEM_TYPE == SERVER)
+ val = 1; /* Enable */
+#else
+ val = 0; /* Disable */
+#endif
+ break;
+ case NV_ECCRedir:
+ val = 0; /* Disable */
+ //val = 1; /* Enable */
+ break;
+ case NV_DramBKScrub:
+ val = 0x00; /* Disabled */
+ //val = 0x01; /* 40ns */
+ //val = 0x02; /* 80ns */
+ //val = 0x03; /* 160ns */
+ //val = 0x04; /* 320ns */
+ //val = 0x05; /* 640ns */
+ //val = 0x06; /* 1.28us */
+ //val = 0x07; /* 2.56us */
+ //val = 0x08; /* 5.12us */
+ //val = 0x09; /* 10.2us */
+ //val = 0x0a; /* 20.5us */
+ //val = 0x0b; /* 41us */
+ //val = 0x0c; /* 81.9us */
+ //val = 0x0d; /* 163.8us */
+ //val = 0x0e; /* 327.7us */
+ //val = 0x0f; /* 655.4us */
+ //val = 0x10; /* 1.31ms */
+ //val = 0x11; /* 2.62ms */
+ //val = 0x12; /* 5.24ms */
+ //val = 0x13; /* 10.49ms */
+ //val = 0x14; /* 20.97sms */
+ //val = 0x15; /* 42ms */
+ //val = 0x16; /* 84ms */
+ break;
+ case NV_L2BKScrub:
+ val = 0; /* Disabled - See L2Scrub in BKDG */
+ break;
+ case NV_DCBKScrub:
+ val = 0; /* Disabled - See DcacheScrub in BKDG */
+ break;
+ case NV_CS_SpareCTL:
+ val = 0; /* Disabled */
+ //val = 1; /* Enabled */
+ case NV_SyncOnUnEccEn:
+ val = 0; /* Disabled */
+ //val = 1; /* Enabled */
+ case NV_Unganged:
+ /* channel interleave is better performance than ganged mode at this time */
+ val = 1; /* Enabled */
+ //val = 0; /* Disabled */
+ case NV_ChannelIntlv:
+ val = 5; /* Disabled */ /* Not currently checked in mctchi_d.c */
+ /* Bit 0 = 0 - Disable
+ * 1 - Enable
+ * Bits[2:1] = 00b - Address bits 6
+ * 01b - Address bits 1
+ * 10b - Hash*, XOR of address bits [20:16, 6]
+ * 11b - Hash*, XOR of address bits [20:16, 9]
+ */
+
+ }
+
+ return val;
+}
+
+
+void mctHookAfterDIMMpre(void)
+{
+}
+
+
+void mctGet_MaxLoadFreq(struct DCTStatStruc *pDCTstat)
+{
+ pDCTstat->PresetmaxFreq = 400;
+}
+
+
+void mctAdjustAutoCycTmg(void)
+{
+}
+
+void mctAdjustAutoCycTmg_D(void)
+{
+}
+
+
+void mctHookAfterAutoCycTmg(void)
+{
+}
+
+
+void mctGetCS_ExcludeMap(void)
+{
+}
+
+
+void mctHookAfterAutoCfg(void)
+{
+}
+
+
+void mctHookAfterPSCfg(void)
+{
+}
+
+
+void mctHookAfterHTMap(void)
+{
+}
+
+
+void mctHookAfterCPU(void)
+{
+}
+
+
+void mctSaveDQSSigTmg_D(void)
+{
+}
+
+
+void mctGetDQSSigTmg_D(void)
+{
+}
+
+
+void mctHookBeforeECC(void)
+{
+}
+
+
+void mctHookAfterECC(void)
+{
+}
+
+
+void mctInitMemGPIOs_A(void)
+{
+}
+
+
+void mctInitMemGPIOs_A_D(void)
+{
+}
+
+
+void mctNodeIDDebugPort_D(void)
+{
+}
+
+
+void mctWarmReset(void)
+{
+}
+
+void mctWarmReset_D(void)
+{
+}
+
+
+void mctHookBeforeDramInit(void)
+{
+}
+
+
+void mctHookAfterDramInit(void)
+{
+}
+
+
+void mctHookBeforeAnyTraining(void)
+{
+}
+
+void mctHookAfterAnyTraining(void)
+{
+}
+
+u32 mctGetLogicalCPUID_D(u8 node)
+{
+ return mctGetLogicalCPUID(node);
+}