aboutsummaryrefslogtreecommitdiff
path: root/src/northbridge/amd/amdmct/mct_ddr3
diff options
context:
space:
mode:
Diffstat (limited to 'src/northbridge/amd/amdmct/mct_ddr3')
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mct_d.c38
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c38
2 files changed, 46 insertions, 30 deletions
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c
index 4bfb08aafc..f696daeed9 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c
@@ -47,8 +47,6 @@ static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat);
static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat);
-static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
- struct DCTStatStruc *pDCTstatA);
static u8 NodePresent_D(u8 Node);
static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstatA);
@@ -1507,10 +1505,11 @@ restartinit:
InterleaveChannels_D(pMCTstat, pDCTstatA);
printk(BIOS_DEBUG, "mctAutoInitMCT_D: ECCInit_D\n");
- if (ECCInit_D(pMCTstat, pDCTstatA)) { /* Setup ECC control and ECC check-bits*/
- printk(BIOS_DEBUG, "mctAutoInitMCT_D: MCTMemClr_D\n");
- MCTMemClr_D(pMCTstat,pDCTstatA);
- }
+ ECCInit_D(pMCTstat, pDCTstatA); /* Setup ECC control and ECC check-bits*/
+
+ /* mctDoWarmResetMemClr_D(); */
+ printk(BIOS_DEBUG, "mctAutoInitMCT_D: MCTMemClr_D\n");
+ MCTMemClr_D(pMCTstat,pDCTstatA);
printk(BIOS_DEBUG, "mctAutoInitMCT_D: mct_ForceNBPState0_Dis_Fam15\n");
for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
@@ -2102,9 +2101,6 @@ static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
/* FIXME - currently uses calculated value TrainMaxReadLatency_D(pMCTstat, pDCTstatA); */
mctHookAfterAnyTraining();
-
- /* mctDoWarmResetMemClr_D(); */
- MCTMemClr_D(pMCTstat, pDCTstatA);
}
static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
@@ -2392,26 +2388,6 @@ static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
}
}
-static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
- struct DCTStatStruc *pDCTstatA)
-{
- /* Ensures that memory clear has completed on all node.*/
- u8 Node;
- struct DCTStatStruc *pDCTstat;
-
- if (!mctGet_NVbits(NV_DQSTrainCTL)){
- /* callback to wrapper: mctDoWarmResetMemClr_D */
- } else { /* NV_DQSTrainCTL == 1 */
- for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
- pDCTstat = pDCTstatA + Node;
-
- if (pDCTstat->NodePresent) {
- DCTMemClr_Sync_D(pMCTstat, pDCTstat);
- }
- }
- }
-}
-
static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat)
{
@@ -3097,6 +3073,8 @@ static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
u16 proposedFreq;
u16 word;
+ printk(BIOS_DEBUG, "%s: Start\n", __func__);
+
/* Get CPU Si Revision defined limit (NPT) */
if (is_fam15h())
proposedFreq = 933;
@@ -3121,6 +3099,8 @@ static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
pDCTstat->PresetmaxFreq = word;
}
/* Check F3xE8[DdrMaxRate] for maximum DRAM data rate support */
+
+ printk(BIOS_DEBUG, "%s: Done\n", __func__);
}
static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c
index 6ab33a3b1e..8701baebf7 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c
@@ -84,6 +84,10 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
u32 val;
u16 nvbits;
+ uint32_t dword;
+ uint8_t sync_flood_on_dram_err[MAX_NODES_SUPPORTED];
+ uint8_t sync_flood_on_any_uc_err[MAX_NODES_SUPPORTED];
+
mctHookBeforeECC();
/* Construct these booleans, based on setup options, for easy handling
@@ -107,6 +111,25 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
nvbits = mctGet_NVbits(NV_DramBKScrub);
OF_ScrubCTL |= nvbits;
+ /* Prevent lockups on DRAM errors during ECC init */
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+
+ if (NodePresent_D(Node)) {
+ dword = Get_NB32(pDCTstat->dev_nbmisc, 0x44);
+ sync_flood_on_dram_err[Node] = (dword >> 30) & 0x1;
+ sync_flood_on_any_uc_err[Node] = (dword >> 21) & 0x1;
+ dword &= ~(0x1 << 30);
+ dword &= ~(0x1 << 21);
+ Set_NB32(pDCTstat->dev_nbmisc, 0x44, dword);
+
+ /* Clear the RAM before enabling ECC to prevent MCE-related lockups */
+ DCTMemClr_Init_D(pMCTstat, pDCTstat);
+ DCTMemClr_Sync_D(pMCTstat, pDCTstat);
+ }
+ }
+
AllECC = 1;
MemClrECC = 0;
for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
@@ -153,7 +176,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
} /* Node has Dram */
if (MemClrECC) {
- MCTMemClrSync_D(pMCTstat, pDCTstatA);
+ DCTMemClr_Sync_D(pMCTstat, pDCTstat);
}
if (pDCTstat->LogicalCPUID & (AMD_DR_GT_D0 | AMD_FAM15_ALL)) {
@@ -166,6 +189,19 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
} /* if Node present */
}
+ /* Restore previous MCA error handling settings */
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
+ struct DCTStatStruc *pDCTstat;
+ pDCTstat = pDCTstatA + Node;
+
+ if (NodePresent_D(Node)) {
+ dword = Get_NB32(pDCTstat->dev_nbmisc, 0x44);
+ dword |= (sync_flood_on_dram_err[Node] & 0x1) << 30;
+ dword |= (sync_flood_on_any_uc_err[Node] & 0x1) << 21;
+ Set_NB32(pDCTstat->dev_nbmisc, 0x44, dword);
+ }
+ }
+
if(AllECC)
pMCTstat->GStatus |= 1<<GSB_ECCDIMMs;
else