aboutsummaryrefslogtreecommitdiff
path: root/src/northbridge/amd/amdmct/mct_ddr3
diff options
context:
space:
mode:
authorElyes HAOUAS <ehaouas@noos.fr>2016-09-19 10:25:41 -0600
committerPatrick Georgi <pgeorgi@google.com>2016-09-21 16:49:15 +0200
commite1606731b63bedd12398acb57a115aa5d280811e (patch)
tree8da66e35adfc3142ae1eb822899abf039c975432 /src/northbridge/amd/amdmct/mct_ddr3
parent8aa20193a6dc12ba6cf740b1ad41023475d69698 (diff)
northbridge/amd/amdmct: Improve code formatting
Change-Id: If87718b6c91d79212a9b045f5fda32d69ac4caee Signed-off-by: Elyes HAOUAS <ehaouas@noos.fr> Reviewed-on: https://review.coreboot.org/16643 Tested-by: build bot (Jenkins) Reviewed-by: Patrick Georgi <pgeorgi@google.com>
Diffstat (limited to 'src/northbridge/amd/amdmct/mct_ddr3')
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mct_d.c128
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mct_d.h220
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h12
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c2
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c6
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c2
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c44
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c28
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c28
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c10
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctrci.c4
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c6
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c56
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c2
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c6
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c20
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mctwl.c10
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c82
-rw-r--r--src/northbridge/amd/amdmct/mct_ddr3/s3utils.c110
19 files changed, 388 insertions, 388 deletions
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c
index 08d8d43ff3..da2a4fe0c6 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c
@@ -2625,7 +2625,7 @@ static void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
* 1. BSP in Big Real Mode
* 2. Stack at SS:SP, located somewhere between A000:0000 and F000:FFFF
* 3. Checksummed or Valid NVRAM bits
- * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
+ * 4. MCG_CTL = -1, MC4_CTL_EN = 0 for all CPUs
* 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to entry
* 6. All var MTRRs reset to zero
* 7. State of NB_CFG.DisDatMsk set properly on all CPUs
@@ -3819,7 +3819,7 @@ static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
}
}
}
- for (Channel = 0; Channel<2; Channel++) {
+ for (Channel = 0; Channel < 2; Channel++) {
SetEccDQSRcvrEn_D(pDCTstat, Channel);
}
@@ -3859,7 +3859,7 @@ static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
}
}
- for (Channel = 0; Channel<2; Channel++) {
+ for (Channel = 0; Channel < 2; Channel++) {
reg = 0x78;
val = Get_NB32_DCT(dev, Channel, reg);
val &= ~(0x3ff<<22);
@@ -3993,7 +3993,7 @@ static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
val = Get_NB32(dev, reg);
Set_NB32(devx, reg, val);
reg += 4;
- } while ( reg < 0x80);
+ } while (reg < 0x80);
} else {
break; /* stop at first absent Node */
}
@@ -4015,7 +4015,7 @@ static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
uint32_t dword;
struct DCTStatStruc *pDCTstat;
- if (!mctGet_NVbits(NV_DQSTrainCTL)){
+ if (!mctGet_NVbits(NV_DQSTrainCTL)) {
/* FIXME: callback to wrapper: mctDoWarmResetMemClr_D */
} else { /* NV_DQSTrainCTL == 1 */
for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
@@ -4080,7 +4080,7 @@ static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
printk(BIOS_DEBUG, "%s: Start\n", __func__);
/* Ensure that a memory clear operation has completed on one node */
- if (pDCTstat->DCTSysLimit){
+ if (pDCTstat->DCTSysLimit) {
printk(BIOS_DEBUG, "%s: Waiting for memory clear to complete", __func__);
do {
dword = Get_NB32(dev, 0x110);
@@ -4223,7 +4223,7 @@ static void DCTFinalInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *p
dword &= ~(1 << ParEn);
Set_NB32_DCT(pDCTstat->dev_dct, dct, 0x90, dword);
- /* To maximize power savings when DisDramInterface=1b,
+ /* To maximize power savings when DisDramInterface = 1b,
* all of the MemClkDis bits should also be set.
*/
Set_NB32_DCT(pDCTstat->dev_dct, dct, 0x88, 0xff000000);
@@ -4369,16 +4369,16 @@ static void SPD2ndTiming(struct MCTStatStruc *pMCTstat,
Trc = 0;
Twr = 0;
Twtr = 0;
- for (i=0; i < 2; i++)
+ for (i = 0; i < 2; i++)
Etr[i] = 0;
- for (i=0; i < 4; i++)
+ for (i = 0; i < 4; i++)
Trfc[i] = 0;
Tfaw = 0;
- for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
+ for (i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
LDIMM = i >> 1;
if (pDCTstat->DIMMValid & (1 << i)) {
- val = pDCTstat->spd_data.spd_bytes[dct + i][SPD_MTBDivisor]; /* MTB=Dividend/Divisor */
+ val = pDCTstat->spd_data.spd_bytes[dct + i][SPD_MTBDivisor]; /* MTB = Dividend/Divisor */
MTB16x = ((pDCTstat->spd_data.spd_bytes[dct + i][SPD_MTBDividend] & 0xff) << 4);
MTB16x /= val; /* transfer to MTB*16 */
@@ -4574,7 +4574,7 @@ static void SPD2ndTiming(struct MCTStatStruc *pMCTstat,
pDCTstat->Twtr = val;
/* Trfc0-Trfc3 */
- for (i=0; i<4; i++)
+ for (i = 0; i < 4; i++)
pDCTstat->Trfc[i] = Trfc[i];
/* Tfaw */
@@ -4647,7 +4647,7 @@ static void SPD2ndTiming(struct MCTStatStruc *pMCTstat,
Set_NB32_DCT(dev, dct, 0x204, dword); /* DRAM Timing 1 */
/* Trfc0-Trfc3 */
- for (i=0; i<4; i++)
+ for (i = 0; i < 4; i++)
if (pDCTstat->Trfc[i] == 0x0)
pDCTstat->Trfc[i] = 0x1;
dword = Get_NB32_DCT(dev, dct, 0x208); /* DRAM Timing 2 */
@@ -4714,7 +4714,7 @@ static void SPD2ndTiming(struct MCTStatStruc *pMCTstat,
DramTimingHi |= val<<16;
val = 0;
- for (i=4;i>0;i--) {
+ for (i = 4; i > 0; i--) {
val <<= 3;
val |= Trfc[i-1];
}
@@ -4850,7 +4850,7 @@ static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
proposedFreq = 800; /* Rev F0 programmable max memclock is */
/*Get User defined limit if "limit" mode */
- if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
+ if (mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
if (word < proposedFreq)
proposedFreq = word;
@@ -4984,7 +4984,7 @@ static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
determine the desired CAS Latency. If tCKproposed is not a standard JEDEC
value (2.5, 1.875, 1.5, or 1.25 ns) then tCKproposed must be adjusted to the
next lower standard tCK value for calculating CLdesired.
- CLdesired = ceiling ( tAAmin(all) / tCKproposed )
+ CLdesired = ceiling (tAAmin(all) / tCKproposed)
where tAAmin is defined in Byte 16. The ceiling function requires that the
quotient be rounded up always. */
CLdesired = tAAmin16x / tCKproposed16x;
@@ -5163,7 +5163,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
if (mctGet_NVbits(NV_ECC_CAP))
if (Status & (1 << SB_ECCDIMMs))
- if ( mctGet_NVbits(NV_ECC))
+ if (mctGet_NVbits(NV_ECC))
DramConfigLo |= 1 << DimmEcEn;
DramConfigLo = mct_DisDllShutdownSR(pMCTstat, pDCTstat, DramConfigLo, dct);
@@ -5210,7 +5210,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
DramConfigHi |= 1 << 18; /* R4 (4-Rank Registered DIMMs) */
}
- if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
+ if (0) /* call back not needed mctOverrideDcqBypMax_D) */
val = mctGet_NVbits(NV_BYPMAX);
else
val = 0x0f; /* recommended setting (default) */
@@ -5224,7 +5224,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
1. We will assume that MemClkDis field has been preset prior to this
point.
2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
- NV_AllMemClks <>0 AND SB_DiagClks ==0 */
+ NV_AllMemClks <>0 AND SB_DiagClks == 0 */
/* Dram Timing Low (owns Clock Enable bits) */
DramTimingLo = Get_NB32_DCT(dev, dct, 0x88);
@@ -5253,7 +5253,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
dword = 0;
byte = 0xFF;
while (dword < MAX_CS_SUPPORTED) {
- if (pDCTstat->CSPresent & (1<<dword)){
+ if (pDCTstat->CSPresent & (1<<dword)) {
/* re-enable clocks for the enabled CS */
val = p[dword];
byte &= ~val;
@@ -5330,11 +5330,11 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
if (pDCTstat->DIMMValid & (1<<byte)) {
byte = pDCTstat->spd_data.spd_bytes[ChipSel + dct][SPD_Addressing];
- Rows = (byte >> 3) & 0x7; /* Rows:0b=12-bit,... */
- Cols = byte & 0x7; /* Cols:0b=9-bit,... */
+ Rows = (byte >> 3) & 0x7; /* Rows:0b = 12-bit,... */
+ Cols = byte & 0x7; /* Cols:0b = 9-bit,... */
byte = pDCTstat->spd_data.spd_bytes[ChipSel + dct][SPD_Density];
- Banks = (byte >> 4) & 7; /* Banks:0b=3-bit,... */
+ Banks = (byte >> 4) & 7; /* Banks:0b = 3-bit,... */
byte = pDCTstat->spd_data.spd_bytes[ChipSel + dct][SPD_Organization];
Ranks = ((byte >> 3) & 7) + 1;
@@ -5351,7 +5351,7 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
byte |= Rows << 3; /* RRRBCC internal encode */
- for (dword=0; dword < 13; dword++) {
+ for (dword = 0; dword < 13; dword++) {
if (byte == Tab_BankAddr[dword])
break;
}
@@ -5367,7 +5367,7 @@ static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
or 2pow(rows+cols+banks-5)-1*/
csMask = 0;
- byte = Rows + Cols; /* cl=rows+cols*/
+ byte = Rows + Cols; /* cl = rows+cols*/
byte += 21; /* row:12+col:9 */
byte -= 2; /* 3 banks - 5 */
@@ -5435,7 +5435,7 @@ static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
/* Check Symmetry of Channel A and Channel B DIMMs
(must be matched for 128-bit mode).*/
- for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
+ for (i = 0; i < MAX_DIMMS_SUPPORTED; i += 2) {
if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
byte = pDCTstat->spd_data.spd_bytes[i][SPD_Addressing] & 0x7;
byte1 = pDCTstat->spd_data.spd_bytes[i + 1][SPD_Addressing] & 0x7;
@@ -5498,7 +5498,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
_DSpareEn = 0;
- /* CS Sparing 1=enabled, 0=disabled */
+ /* CS Sparing 1 = enabled, 0 = disabled */
if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
if (MCT_DIMM_SPARE_NO_WARM) {
/* Do no warm-reset DIMM spare */
@@ -5513,7 +5513,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
pDCTstat->ErrStatus |= 1 << SB_SpareDis;
}
} else {
- if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
+ if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1 = enabled, 0 = disabled */
word = pDCTstat->CSPresent;
val = bsf(word);
word &= ~(1 << val);
@@ -5527,13 +5527,13 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
}
nxtcsBase = 0; /* Next available cs base ADDR[39:8] */
- for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
+ for (p = 0; p < MAX_DIMMS_SUPPORTED; p++) {
BiggestBank = 0;
for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
if (pDCTstat->CSPresent & (1 << q)) { /* bank present? */
reg = 0x40 + (q << 2); /* Base[q] reg.*/
val = Get_NB32_DCT(dev, dct, reg);
- if (!(val & 3)) { /* (CSEnable|Spare==1)bank is enabled already? */
+ if (!(val & 3)) { /* (CSEnable|Spare == 1)bank is enabled already? */
reg = 0x60 + (q << 1); /*Mask[q] reg.*/
val = Get_NB32_DCT(dev, dct, reg);
val >>= 19;
@@ -5549,7 +5549,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
} /*if bank present */
} /* while q */
if (BiggestBank !=0) {
- curcsBase = nxtcsBase; /* curcsBase=nxtcsBase*/
+ curcsBase = nxtcsBase; /* curcsBase = nxtcsBase*/
/* DRAM CS Base b Address Register offset */
reg = 0x40 + (b << 2);
if (_DSpareEn) {
@@ -5579,7 +5579,7 @@ static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
}
/* bank present but disabled?*/
- if ( pDCTstat->CSTestFail & (1 << p)) {
+ if (pDCTstat->CSTestFail & (1 << p)) {
/* DRAM CS Base b Address Register offset */
reg = (p << 2) + 0x40;
val = 1 << TestFail;
@@ -5611,12 +5611,12 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
/* Check DIMMs present, verify checksum, flag SDRAM type,
* build population indicator bitmaps, and preload bus loading
* of DIMMs into DCTStatStruc.
- * MAAload=number of devices on the "A" bus.
- * MABload=number of devices on the "B" bus.
- * MAAdimms=number of DIMMs on the "A" bus slots.
- * MABdimms=number of DIMMs on the "B" bus slots.
- * DATAAload=number of ranks on the "A" bus slots.
- * DATABload=number of ranks on the "B" bus slots.
+ * MAAload = number of devices on the "A" bus.
+ * MABload = number of devices on the "B" bus.
+ * MAAdimms = number of DIMMs on the "A" bus slots.
+ * MABdimms = number of DIMMs on the "B" bus slots.
+ * DATAAload = number of ranks on the "A" bus slots.
+ * DATABload = number of ranks on the "B" bus slots.
*/
u16 i, j, k;
u8 smbaddr;
@@ -5747,7 +5747,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
byte &= 7;
if (byte == 3) { /* 4ranks */
/* if any DIMMs are QR, we have to make two passes through DIMMs*/
- if ( pDCTstat->DimmQRPresent == 0) {
+ if (pDCTstat->DimmQRPresent == 0) {
MaxDimms <<= 1;
}
if (i < DimmSlots) {
@@ -5767,7 +5767,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
else if (devwidth == 2)
bytex = 4;
- byte++; /* al+1=rank# */
+ byte++; /* al+1 = rank# */
if (byte == 2)
bytex <<= 1; /*double Addr bus load value for dual rank DIMMs*/
@@ -5847,7 +5847,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
}
}
if (pDCTstat->DimmECCPresent != 0) {
- if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
+ if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid) == 0) {
/* all DIMMs are ECC capable */
pDCTstat->Status |= 1<<SB_ECCDIMMs;
}
@@ -5961,7 +5961,7 @@ static void mct_initDCT(struct MCTStatStruc *pMCTstat,
val &= ~(1 << ParEn);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x90, val);
- /* To maximize power savings when DisDramInterface=1b,
+ /* To maximize power savings when DisDramInterface = 1b,
* all of the MemClkDis bits should also be set.
*/
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x88, 0xff000000);
@@ -6119,7 +6119,7 @@ static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
i_start = dct;
i_end = dct + 1;
}
- for (i=i_start; i<i_end; i++) {
+ for (i = i_start; i < i_end; i++) {
index_reg = 0x98;
Set_NB32_index_wait_DCT(dev, i, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]); /* Channel A/B Output Driver Compensation Control */
Set_NB32_index_wait_DCT(dev, i, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]); /* Channel A/B Output Driver Compensation Control */
@@ -6152,7 +6152,7 @@ static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct)
{
if (!pDCTstat->GangedMode) {
- if (dct == 0 ) {
+ if (dct == 0) {
pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
if (pDCTstat->DIMMValidDCT[dct] == 0)
pDCTstat->ErrCode = SC_StopError;
@@ -6172,7 +6172,7 @@ static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
u8 ret;
u32 val;
- if ( dct == 0) {
+ if (dct == 0) {
SPDCalcWidth_D(pMCTstat, pDCTstat);
ret = mct_setMode(pMCTstat, pDCTstat);
} else {
@@ -6313,7 +6313,7 @@ static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
Set_OtherTiming(pMCTstat, pDCTstat, 0);
}
- if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
+ if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode) {
pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
Set_OtherTiming(pMCTstat, pDCTstat, 1);
}
@@ -6568,8 +6568,8 @@ static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
if (index == 0x12)
ecc_reg = 1;
- for (i=0; i < 8; i+=2) {
- if ( pDCTstat->DIMMValid & (1 << i)) {
+ for (i = 0; i < 8; i+=2) {
+ if (pDCTstat->DIMMValid & (1 << i)) {
val = Get_NB32_index_wait_DCT(dev, dct, index_reg, index);
val &= 0x00E000E0;
byte = (val >> 5) & 0xFF;
@@ -6607,11 +6607,11 @@ static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
Smallest = 3;
Largest = 0;
- for (i=0; i < 2; i++) {
+ for (i = 0; i < 2; i++) {
val = Get_NB32_index_wait_DCT(dev, dct, index_reg, index);
val &= 0x60606060;
val >>= 5;
- for (j=0; j < 4; j++) {
+ for (j = 0; j < 4; j++) {
byte = val & 0xFF;
if (byte < Smallest)
Smallest = byte;
@@ -6774,7 +6774,7 @@ static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
/* ClrClToNB_D postponed until we're done executing from ROM */
mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
- /* set F3x8C[DisFastTprWr] on all DR, if L3Size=0 */
+ /* set F3x8C[DisFastTprWr] on all DR, if L3Size = 0 */
if (pDCTstat->LogicalCPUID & AMD_DR_ALL) {
if (!(cpuid_edx(0x80000006) & 0xFFFC0000)) {
val = Get_NB32(pDCTstat->dev_nbmisc, 0x8C);
@@ -6948,7 +6948,7 @@ static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
/* Copy dram map from F1x40/44,F1x48/4c,
to F1x120/124(Node0),F1x120/124(Node1),...*/
- for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
+ for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
pDCTstat = pDCTstatA + Node;
devx = pDCTstat->dev_map;
@@ -6975,7 +6975,7 @@ static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
val |= Dramlimit;
Set_NB32(devx, reg, val);
- if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
+ if (pMCTstat->GStatus & (1 << GSB_HWHole)) {
reg = 0xF0;
val = Get_NB32(devx, reg);
val |= (1 << DramMemHoistValid);
@@ -7328,7 +7328,7 @@ static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
} else {
dword = Get_NB32_index_wait_DCT(dev, dct, index_reg, 0x00);
dword = 0;
- for (i=0; i < 6; i++) {
+ for (i = 0; i < 6; i++) {
switch (i) {
case 0:
case 4:
@@ -7668,7 +7668,7 @@ static void mct_ProgramODT_D(struct MCTStatStruc *pMCTstat,
dword = 0x00000800;
else
dword = 0x00000000;
- for (i=0; i < 2; i++) {
+ for (i = 0; i < 2; i++) {
Set_NB32_DCT(dev, i, 0x98, 0x0D000030);
Set_NB32_DCT(dev, i, 0x9C, dword);
Set_NB32_DCT(dev, i, 0x98, 0x4D040F30);
@@ -7958,11 +7958,11 @@ void ProgDramMRSReg_D(struct MCTStatStruc *pMCTstat,
DramMRS |= mct_DramTermDyn_RDimm(pMCTstat, pDCTstat, byte);
}
- /* Qoff=0, output buffers enabled */
+ /* Qoff = 0, output buffers enabled */
/* Tcwl */
DramMRS |= (pDCTstat->Speed - 4) << 20;
- /* ASR=1, auto self refresh */
- /* SRT=0 */
+ /* ASR = 1, auto self refresh */
+ /* SRT = 0 */
DramMRS |= 1 << 18;
}
@@ -7989,10 +7989,10 @@ void mct_SetDramConfigHi_D(struct MCTStatStruc *pMCTstat,
* Solution: From the bug report:
* 1. A software-initiated frequency change should be wrapped into the
* following sequence :
- * - a) Disable Compensation (F2[1, 0]9C_x08[30] )
+ * - a) Disable Compensation (F2[1, 0]9C_x08[30])
* b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
* c) Do frequency change
- * d) Enable Compensation (F2[1, 0]9C_x08[30] )
+ * d) Enable Compensation (F2[1, 0]9C_x08[30])
* 2. A software-initiated Disable Compensation should always be
* followed by step b) of the above steps.
* Silicon Status: Fixed In Rev B0
@@ -8275,9 +8275,9 @@ static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
/* ==========================================================
* 6-bit Bank Addressing Table
- * RR=rows-13 binary
- * B=Banks-2 binary
- * CCC=Columns-9 binary
+ * RR = rows-13 binary
+ * B = Banks-2 binary
+ * CCC = Columns-9 binary
* ==========================================================
* DCT CCCBRR Rows Banks Columns 64-bit CS Size
* Encoding
@@ -8311,7 +8311,7 @@ uint8_t crcCheck(struct DCTStatStruc *pDCTstat, uint8_t dimm)
for (Index = 0; Index < byte_use; Index ++) {
byte = pDCTstat->spd_data.spd_bytes[dimm][Index];
CRC ^= byte << 8;
- for (i=0; i<8; i++) {
+ for (i = 0; i < 8; i++) {
if (CRC & 0x8000) {
CRC <<= 1;
CRC ^= 0x1021;
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.h b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.h
index e1d9da53fb..c42e452300 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.h
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.h
@@ -33,16 +33,16 @@
#define PT_C3 5
#define PT_FM2 6
-#define J_MIN 0 /* j loop constraint. 1=CL 2.0 T*/
-#define J_MAX 5 /* j loop constraint. 5=CL 7.0 T*/
-#define K_MIN 1 /* k loop constraint. 1=200 MHz*/
-#define K_MAX 5 /* k loop constraint. 5=533 MHz*/
-#define CL_DEF 2 /* Default value for failsafe operation. 2=CL 4.0 T*/
-#define T_DEF 1 /* Default value for failsafe operation. 1=5ns (cycle time)*/
-
-#define BSCRate 1 /* reg bit field=rate of dram scrubber for ecc*/
+#define J_MIN 0 /* j loop constraint. 1 = CL 2.0 T*/
+#define J_MAX 5 /* j loop constraint. 5 = CL 7.0 T*/
+#define K_MIN 1 /* k loop constraint. 1 = 200 MHz*/
+#define K_MAX 5 /* k loop constraint. 5 = 533 MHz*/
+#define CL_DEF 2 /* Default value for failsafe operation. 2 = CL 4.0 T*/
+#define T_DEF 1 /* Default value for failsafe operation. 1 = 5ns (cycle time)*/
+
+#define BSCRate 1 /* reg bit field = rate of dram scrubber for ecc*/
/* memory initialization (ecc and check-bits).*/
- /* 1=40 ns/64 bytes.*/
+ /* 1 = 40 ns/64 bytes.*/
#define FirstPass 1 /* First pass through RcvEn training*/
#define SecondPass 2 /* Second pass through Rcven training*/
@@ -336,7 +336,7 @@ struct DCTStatStruc { /* A per Node structure*/
/* DCTStatStruct_F - start */
u8 Node_ID; /* Node ID of current controller */
uint8_t Internal_Node_ID; /* Internal Node ID of the current controller */
- uint8_t Dual_Node_Package; /* 1=Dual node package (G34) */
+ uint8_t Dual_Node_Package; /* 1 = Dual node package (G34) */
uint8_t stopDCT[2]; /* Set if the DCT will be stopped */
u8 ErrCode; /* Current error condition of Node
0= no error
@@ -353,7 +353,7 @@ struct DCTStatStruc { /* A per Node structure*/
/* SPD address of..MB2_CS_L[0,1]*/
/* SPD address of..MA3_CS_L[0,1]*/
/* SPD address of..MB3_CS_L[0,1]*/
- u16 DIMMPresent; /*For each bit n 0..7, 1=DIMM n is present.
+ u16 DIMMPresent; /*For each bit n 0..7, 1 = DIMM n is present.
DIMM# Select Signal
0 MA0_CS_L[0,1]
1 MB0_CS_L[0,1]
@@ -363,15 +363,15 @@ struct DCTStatStruc { /* A per Node structure*/
5 MB2_CS_L[0,1]
6 MA3_CS_L[0,1]
7 MB3_CS_L[0,1]*/
- u16 DIMMValid; /* For each bit n 0..7, 1=DIMM n is valid and is/will be configured*/
- u16 DIMMMismatch; /* For each bit n 0..7, 1=DIMM n is mismatched, channel B is always considered the mismatch */
- u16 DIMMSPDCSE; /* For each bit n 0..7, 1=DIMM n SPD checksum error*/
- u16 DimmECCPresent; /* For each bit n 0..7, 1=DIMM n is ECC capable.*/
- u16 DimmPARPresent; /* For each bit n 0..7, 1=DIMM n is ADR/CMD Parity capable.*/
- u16 Dimmx4Present; /* For each bit n 0..7, 1=DIMM n contains x4 data devices.*/
- u16 Dimmx8Present; /* For each bit n 0..7, 1=DIMM n contains x8 data devices.*/
- u16 Dimmx16Present; /* For each bit n 0..7, 1=DIMM n contains x16 data devices.*/
- u16 DIMM2Kpage; /* For each bit n 0..7, 1=DIMM n contains 1K page devices.*/
+ u16 DIMMValid; /* For each bit n 0..7, 1 = DIMM n is valid and is/will be configured*/
+ u16 DIMMMismatch; /* For each bit n 0..7, 1 = DIMM n is mismatched, channel B is always considered the mismatch */
+ u16 DIMMSPDCSE; /* For each bit n 0..7, 1 = DIMM n SPD checksum error*/
+ u16 DimmECCPresent; /* For each bit n 0..7, 1 = DIMM n is ECC capable.*/
+ u16 DimmPARPresent; /* For each bit n 0..7, 1 = DIMM n is ADR/CMD Parity capable.*/
+ u16 Dimmx4Present; /* For each bit n 0..7, 1 = DIMM n contains x4 data devices.*/
+ u16 Dimmx8Present; /* For each bit n 0..7, 1 = DIMM n contains x8 data devices.*/
+ u16 Dimmx16Present; /* For each bit n 0..7, 1 = DIMM n contains x16 data devices.*/
+ u16 DIMM2Kpage; /* For each bit n 0..7, 1 = DIMM n contains 1K page devices.*/
u8 MAload[2]; /* Number of devices loading MAA bus*/
/* Number of devices loading MAB bus*/
u8 MAdimms[2]; /*Number of DIMMs loading CH A*/
@@ -379,17 +379,17 @@ struct DCTStatStruc { /* A per Node structure*/
u8 DATAload[2]; /*Number of ranks loading CH A DATA*/
/* Number of ranks loading CH B DATA*/
u8 DIMMAutoSpeed; /*Max valid Mfg. Speed of DIMMs
- 1=200MHz
- 2=266MHz
- 3=333MHz
- 4=400MHz
- 5=533MHz*/
+ 1 = 200MHz
+ 2 = 266MHz
+ 3 = 333MHz
+ 4 = 400MHz
+ 5 = 533MHz*/
u8 DIMMCASL; /* Min valid Mfg. CL bitfield
- 0=2.0
- 1=3.0
- 2=4.0
- 3=5.0
- 4=6.0 */
+ 0 = 2.0
+ 1 = 3.0
+ 2 = 4.0
+ 3 = 5.0
+ 4 = 6.0 */
u16 DIMMTrcd; /* Minimax Trcd*40 (ns) of DIMMs*/
u16 DIMMTrp; /* Minimax Trp*40 (ns) of DIMMs*/
u16 DIMMTrtp; /* Minimax Trtp*40 (ns) of DIMMs*/
@@ -399,16 +399,16 @@ struct DCTStatStruc { /* A per Node structure*/
u16 DIMMTrrd; /* Minimax Trrd*40 (ns) of DIMMs*/
u16 DIMMTwtr; /* Minimax Twtr*40 (ns) of DIMMs*/
u8 Speed; /* Bus Speed (to set Controller)
- 1=200MHz
- 2=266MHz
- 3=333MHz
- 4=400MHz */
+ 1 = 200MHz
+ 2 = 266MHz
+ 3 = 333MHz
+ 4 = 400MHz */
u8 CASL; /* CAS latency DCT setting
- 0=2.0
- 1=3.0
- 2=4.0
- 3=5.0
- 4=6.0 */
+ 0 = 2.0
+ 1 = 3.0
+ 2 = 4.0
+ 3 = 5.0
+ 4 = 6.0 */
u8 Trcd; /* DCT Trcd (busclocks) */
u8 Trp; /* DCT Trp (busclocks) */
u8 Trtp; /* DCT Trtp (busclocks) */
@@ -418,27 +418,27 @@ struct DCTStatStruc { /* A per Node structure*/
u8 Trrd; /* DCT Trrd (busclocks) */
u8 Twtr; /* DCT Twtr (busclocks) */
u8 Trfc[4]; /* DCT Logical DIMM0 Trfc
- 0=75ns (for 256Mb devs)
- 1=105ns (for 512Mb devs)
- 2=127.5ns (for 1Gb devs)
- 3=195ns (for 2Gb devs)
- 4=327.5ns (for 4Gb devs) */
+ 0 = 75ns (for 256Mb devs)
+ 1 = 105ns (for 512Mb devs)
+ 2 = 127.5ns (for 1Gb devs)
+ 3 = 195ns (for 2Gb devs)
+ 4 = 327.5ns (for 4Gb devs) */
/* DCT Logical DIMM1 Trfc (see Trfc0 for format) */
/* DCT Logical DIMM2 Trfc (see Trfc0 for format) */
/* DCT Logical DIMM3 Trfc (see Trfc0 for format) */
- u16 CSPresent; /* For each bit n 0..7, 1=Chip-select n is present */
- u16 CSTestFail; /* For each bit n 0..7, 1=Chip-select n is present but disabled */
+ u16 CSPresent; /* For each bit n 0..7, 1 = Chip-select n is present */
+ u16 CSTestFail; /* For each bit n 0..7, 1 = Chip-select n is present but disabled */
u32 DCTSysBase; /* BASE[39:8] (system address) of this Node's DCTs. */
u32 DCTHoleBase; /* If not zero, BASE[39:8] (system address) of dram hole for HW remapping. Dram hole exists on this Node's DCTs. */
u32 DCTSysLimit; /* LIMIT[39:8] (system address) of this Node's DCTs */
u16 PresetmaxFreq; /* Maximum OEM defined DDR frequency
- 200=200MHz (DDR400)
- 266=266MHz (DDR533)
- 333=333MHz (DDR667)
- 400=400MHz (DDR800) */
+ 200 = 200MHz (DDR400)
+ 266 = 266MHz (DDR533)
+ 333 = 333MHz (DDR667)
+ 400 = 400MHz (DDR800) */
u8 _2Tmode; /* 1T or 2T CMD mode (slow access mode)
- 1=1T
- 2=2T */
+ 1 = 1T
+ 2 = 2T */
u8 TrwtTO; /* DCT TrwtTO (busclocks)*/
u8 Twrrd; /* DCT Twrrd (busclocks)*/
u8 Twrwr; /* DCT Twrwr (busclocks)*/
@@ -462,9 +462,9 @@ struct DCTStatStruc { /* A per Node structure*/
/* CHB Byte 0-7 Read DQS Delay */
u32 PtrPatternBufA; /* Ptr on stack to aligned DQS testing pattern*/
u32 PtrPatternBufB; /* Ptr on stack to aligned DQS testing pattern*/
- u8 Channel; /* Current Channel (0= CH A, 1=CH B)*/
+ u8 Channel; /* Current Channel (0= CH A, 1 = CH B)*/
u8 ByteLane; /* Current Byte Lane (0..7)*/
- u8 Direction; /* Current DQS-DQ training write direction (0=read, 1=write)*/
+ u8 Direction; /* Current DQS-DQ training write direction (0 = read, 1 = write)*/
u8 Pattern; /* Current pattern*/
u8 DQSDelay; /* Current DQS delay value*/
u32 TrainErrors; /* Current Training Errors*/
@@ -545,15 +545,15 @@ struct DCTStatStruc { /* A per Node structure*/
u8 WrDatGrossH;
u8 DqsRcvEnGrossL;
/* NOTE: Not used - u8 NodeSpeed */ /* Bus Speed (to set Controller) */
- /* 1=200MHz */
- /* 2=266MHz */
- /* 3=333MHz */
+ /* 1 = 200MHz */
+ /* 2 = 266MHz */
+ /* 3 = 333MHz */
/* NOTE: Not used - u8 NodeCASL */ /* CAS latency DCT setting */
- /* 0=2.0 */
- /* 1=3.0 */
- /* 2=4.0 */
- /* 3=5.0 */
- /* 4=6.0 */
+ /* 0 = 2.0 */
+ /* 1 = 3.0 */
+ /* 2 = 4.0 */
+ /* 3 = 5.0 */
+ /* 4 = 6.0 */
u8 TrwtWB;
u8 CurrRcvrCHADelay; /* for keep current RcvrEnDly of chA*/
u16 T1000; /* get the T1000 figure (cycle time (ns)*1K)*/
@@ -852,7 +852,7 @@ struct amd_s3_persistent_data {
#define SB_SWNodeHole 8 /* Remapping of Node Base on this Node to create a gap.*/
#define SB_HWHole 9 /* Memory Hole created on this Node using HW remapping.*/
#define SB_Over400MHz 10 /* DCT freq >= 400MHz flag*/
-#define SB_DQSPos_Pass2 11 /* Using for TrainDQSPos DIMM0/1, when freq>=400MHz*/
+#define SB_DQSPos_Pass2 11 /* Using for TrainDQSPos DIMM0/1, when freq >= 400MHz*/
#define SB_DQSRcvLimit 12 /* Using for DQSRcvEnTrain to know we have reached to upper bound.*/
#define SB_ExtConfig 13 /* Indicator the default setting for extend PCI configuration support*/
@@ -862,73 +862,73 @@ struct amd_s3_persistent_data {
===============================================================================*/
/*Platform Configuration*/
#define NV_PACK_TYPE 0 /* CPU Package Type (2-bits)
- 0=NPT L1
- 1=NPT M2
- 2=NPT S1*/
+ 0 = NPT L1
+ 1 = NPT M2
+ 2 = NPT S1*/
#define NV_MAX_NODES 1 /* Number of Nodes/Sockets (4-bits)*/
#define NV_MAX_DIMMS 2 /* Number of DIMM slots for the specified Node ID (4-bits)*/
#define NV_MAX_MEMCLK 3 /* Maximum platform demonstrated Memclock (10-bits)
- 200=200MHz (DDR400)
- 266=266MHz (DDR533)
- 333=333MHz (DDR667)
- 400=400MHz (DDR800)*/
+ 200 = 200MHz (DDR400)
+ 266 = 266MHz (DDR533)
+ 333 = 333MHz (DDR667)
+ 400 = 400MHz (DDR800)*/
#define NV_MIN_MEMCLK 4 /* Minimum platform demonstrated Memclock (10-bits) */
#define NV_ECC_CAP 5 /* Bus ECC capable (1-bits)
- 0=Platform not capable
- 1=Platform is capable*/
+ 0 = Platform not capable
+ 1 = Platform is capable*/
#define NV_4RANKType 6 /* Quad Rank DIMM slot type (2-bits)
- 0=Normal
- 1=R4 (4-Rank Registered DIMMs in AMD server configuration)
- 2=S4 (Unbuffered SO-DIMMs)*/
+ 0 = Normal
+ 1 = R4 (4-Rank Registered DIMMs in AMD server configuration)
+ 2 = S4 (Unbuffered SO-DIMMs)*/
#define NV_BYPMAX 7 /* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
- 4=4 times bypass (normal for non-UMA systems)
- 7=7 times bypass (normal for UMA systems)*/
+ 4 = 4 times bypass (normal for non-UMA systems)
+ 7 = 7 times bypass (normal for UMA systems)*/
#define NV_RDWRQBYP 8 /* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
- 2=8 times (normal for non-UMA systems)
- 3=16 times (normal for UMA systems)*/
+ 2 = 8 times (normal for non-UMA systems)
+ 3 = 16 times (normal for UMA systems)*/
/*Dram Timing*/
#define NV_MCTUSRTMGMODE 10 /* User Memclock Mode (2-bits)
- 0=Auto, no user limit
- 1=Auto, user limit provided in NV_MemCkVal
- 2=Manual, user value provided in NV_MemCkVal*/
+ 0 = Auto, no user limit
+ 1 = Auto, user limit provided in NV_MemCkVal
+ 2 = Manual, user value provided in NV_MemCkVal*/
#define NV_MemCkVal 11 /* Memory Clock Value (2-bits)
- 0=200MHz
- 1=266MHz
- 2=333MHz
- 3=400MHz*/
+ 0 = 200MHz
+ 1 = 266MHz
+ 2 = 333MHz
+ 3 = 400MHz*/
/*Dram Configuration*/
#define NV_BankIntlv 20 /* Dram Bank (chip-select) Interleaving (1-bits)
- 0=disable
- 1=enable*/
+ 0 = disable
+ 1 = enable*/
#define NV_AllMemClks 21 /* Turn on All DIMM clocks (1-bits)
- 0=normal
- 1=enable all memclocks*/
+ 0 = normal
+ 1 = enable all memclocks*/
#define NV_SPDCHK_RESTRT 22 /* SPD Check control bitmap (1-bits)
- 0=Exit current node init if any DIMM has SPD checksum error
- 1=Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
+ 0 = Exit current node init if any DIMM has SPD checksum error
+ 1 = Ignore faulty SPD checksums (Note: DIMM cannot be enabled)*/
#define NV_DQSTrainCTL 23 /* DQS Signal Timing Training Control
- 0=skip DQS training
- 1=perform DQS training*/
+ 0 = skip DQS training
+ 1 = perform DQS training*/
#define NV_NodeIntlv 24 /* Node Memory Interleaving (1-bits)
- 0=disable
- 1=enable*/
+ 0 = disable
+ 1 = enable*/
#define NV_BurstLen32 25 /* BurstLength32 for 64-bit mode (1-bits)
- 0=disable (normal)
- 1=enable (4 beat burst when width is 64-bits)*/
+ 0 = disable (normal)
+ 1 = enable (4 beat burst when width is 64-bits)*/
/*Dram Power*/
#define NV_CKE_PDEN 30 /* CKE based power down mode (1-bits)
- 0=disable
- 1=enable*/
+ 0 = disable
+ 1 = enable*/
#define NV_CKE_CTL 31 /* CKE based power down control (1-bits)
- 0=per Channel control
- 1=per Chip select control*/
+ 0 = per Channel control
+ 1 = per Chip select control*/
#define NV_CLKHZAltVidC3 32 /* Memclock tri-stating during C3 and Alt VID (1-bits)
- 0=disable
- 1=enable*/
+ 0 = disable
+ 1 = enable*/
/*Memory Map/Mgt.*/
#define NV_BottomIO 40 /* Bottom of 32-bit IO space (8-bits)
@@ -936,8 +936,8 @@ struct amd_s3_persistent_data {
#define NV_BottomUMA 41 /* Bottom of shared graphics dram (8-bits)
NV_BottomUMA[7:0]=Addr[31:24]*/
#define NV_MemHole 42 /* Memory Hole Remapping (1-bits)
- 0=disable
- 1=enable */
+ 0 = disable
+ 1 = enable */
/*ECC*/
#define NV_ECC 50 /* Dram ECC enable*/
@@ -949,13 +949,13 @@ struct amd_s3_persistent_data {
#define NV_L3BKScrub 57 /* L3 ECC Background Scrubber CTL*/
#define NV_DCBKScrub 58 /* DCache ECC Background Scrubber CTL*/
#define NV_CS_SpareCTL 59 /* Chip Select Spare Control bit 0:
- 0=disable Spare
- 1=enable Spare */
+ 0 = disable Spare
+ 1 = enable Spare */
/* Chip Select Spare Control bit 1-4:
Reserved, must be zero*/
#define NV_SyncOnUnEccEn 61 /* SyncOnUnEccEn control
- 0=disable
- 1=enable*/
+ 0 = disable
+ 1 = enable*/
#define NV_Unganged 62
#define NV_ChannelIntlv 63 /* Channel Interleaving (3-bits)
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h b/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h
index a7fac8f390..74fadde1f7 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h
@@ -37,7 +37,7 @@ static inline void _RDTSC(u32 *lo, u32 *hi)
__asm__ volatile (
"rdtsc"
: "=a" (*lo), "=d"(*hi)
- );
+ );
}
static inline void _cpu_id(u32 addr, u32 *val)
@@ -57,7 +57,7 @@ static u32 bsr(u32 x)
u8 i;
u32 ret = 0;
- for (i=31; i>0; i--) {
+ for (i = 31; i > 0; i--) {
if (x & (1<<i)) {
ret = i;
break;
@@ -73,7 +73,7 @@ static u32 bsf(u32 x)
u8 i;
u32 ret = 32;
- for (i=0; i<32; i++) {
+ for (i = 0; i < 32; i++) {
if (x & (1<<i)) {
ret = i;
break;
@@ -83,9 +83,9 @@ static u32 bsf(u32 x)
return ret;
}
-#define _MFENCE asm volatile ( "mfence")
+#define _MFENCE asm volatile ("mfence")
-#define _SFENCE asm volatile ( "sfence" )
+#define _SFENCE asm volatile ("sfence")
/* prevent speculative execution of following instructions */
#define _EXECFENCE asm volatile ("outb %al, $0xed")
@@ -301,7 +301,7 @@ static u32 stream_to_int(u8 *p)
val = 0;
- for (i=3; i>=0; i--) {
+ for (i = 3; i >= 0; i--) {
val <<= 8;
valx = *(p+i);
val |= valx;
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c b/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c
index 3f013088ab..d6480ab91a 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c
@@ -50,7 +50,7 @@ void mctGet_PS_Cfg_D(struct MCTStatStruc *pMCTstat,
* : ODC_CTL - Output Driver Compensation Control Register Value
* : CMDmode - CMD mode
*/
-static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload,
+static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload,
u8 DATAAload, u32 *AddrTmgCTL, u32 *ODC_CTL,
u8 *CMDmode)
{
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c
index 6c25f2c067..30cf10e90c 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctchi_d.c
@@ -33,8 +33,8 @@ void InterleaveChannels_D(struct MCTStatStruc *pMCTstat,
/* call back to wrapper not needed ManualChannelInterleave_D(); */
/* call back - DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv);*/ /* override interleave */
- /* Manually set: typ=5, otherwise typ=7. */
- DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv); /* typ=5: Hash*: exclusive OR of address bits[20:16, 6]. */
+ /* Manually set: typ = 5, otherwise typ = 7. */
+ DctSelIntLvAddr = mctGet_NVbits(NV_ChannelIntlv); /* typ = 5: Hash*: exclusive OR of address bits[20:16, 6]. */
if (DctSelIntLvAddr & 1) {
DctSelIntLvAddr >>= 1;
@@ -67,7 +67,7 @@ void InterleaveChannels_D(struct MCTStatStruc *pMCTstat,
if (dct1_size == dct0_size) {
dct1_size = 0;
DctSelHi = 0x04; /* DctSelHiRngEn = 0 */
- } else if (dct1_size > dct0_size ) {
+ } else if (dct1_size > dct0_size) {
dct1_size = dct0_size;
DctSelHi = 0x07; /* DctSelHiRngEn = 1, DctSelHi = 1 */
}
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c
index 3f56765e4e..e42a127e33 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c
@@ -45,7 +45,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
while (DoIntlv && (ChipSel < MAX_CS_SUPPORTED)) {
reg = 0x40+(ChipSel<<2); /* Dram CS Base 0 */
val = Get_NB32_DCT(dev, dct, reg);
- if ( val & (1<<CSEnable)) {
+ if (val & (1<<CSEnable)) {
EnChipSels++;
reg = 0x60+((ChipSel>>1)<<2); /*Dram CS Mask 0 */
val = Get_NB32_DCT(dev, dct, reg);
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c
index 06a70e6ebb..71a4b79424 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c
@@ -247,12 +247,12 @@ static void SetEccDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
u8 channel;
u8 direction;
- for (channel = 0; channel < 2; channel++){
+ for (channel = 0; channel < 2; channel++) {
for (direction = 0; direction < 2; direction++) {
pDCTstat->Channel = channel; /* Channel A or B */
pDCTstat->Direction = direction; /* Read or write */
CalcEccDQSPos_D(pMCTstat, pDCTstat, pDCTstat->CH_EccDQSLike[channel], pDCTstat->CH_EccDQSScale[channel], ChipSel);
- print_debug_dqs_pair("\t\tSetEccDQSRdWrPos: channel ", channel, direction==DQS_READDIR? " R dqs_delay":" W dqs_delay", pDCTstat->DQSDelay, 2);
+ print_debug_dqs_pair("\t\tSetEccDQSRdWrPos: channel ", channel, direction == DQS_READDIR? " R dqs_delay":" W dqs_delay", pDCTstat->DQSDelay, 2);
pDCTstat->ByteLane = 8;
StoreDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
mct_SetDQSDelayCSR_D(pMCTstat, pDCTstat, ChipSel);
@@ -294,7 +294,7 @@ static void CalcEccDQSPos_D(struct MCTStatStruc *pMCTstat,
GetDQSDatStrucVal_D(pMCTstat, pDCTstat, ChipSel);
DQSDelay1 = pDCTstat->DQSDelay;
- if (DQSDelay0>DQSDelay1) {
+ if (DQSDelay0 > DQSDelay1) {
DQSDelay = DQSDelay0 - DQSDelay1;
} else {
DQSDelay = DQSDelay1 - DQSDelay0;
@@ -306,7 +306,7 @@ static void CalcEccDQSPos_D(struct MCTStatStruc *pMCTstat,
DQSDelay >>= 8; /* 256 */
- if (DQSDelay0>DQSDelay1) {
+ if (DQSDelay0 > DQSDelay1) {
DQSDelay = DQSDelay1 - DQSDelay;
} else {
DQSDelay += DQSDelay1;
@@ -493,7 +493,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
}
print_debug_dqs("\t\t\t\tTrainDQSRdWrPos: 14 TestAddr ", TestAddr, 4);
- SetUpperFSbase(TestAddr); /* fs:eax=far ptr to target */
+ SetUpperFSbase(TestAddr); /* fs:eax = far ptr to target */
print_debug_dqs("\t\t\t\tTrainDQSRdWrPos: 12 Receiver ", Receiver, 2);
@@ -556,7 +556,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
ResetTargetWTIO_D();
/* Read and compare pattern */
- bytelane_test_results &= (CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8) & 0xff); /* [Lane 7 :: Lane 0] 0=fail, 1=pass */
+ bytelane_test_results &= (CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8) & 0xff); /* [Lane 7 :: Lane 0] 0 = fail, 1 = pass */
/* If all lanes have already failed testing bypass remaining re-read attempt(s) */
if (bytelane_test_results == 0x0)
@@ -650,7 +650,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
ResetTargetWTIO_D();
/* Read and compare pattern from the base test address */
- bytelane_test_results = (CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8) & 0xff); /* [Lane 7 :: Lane 0] 0=fail, 1=pass */
+ bytelane_test_results = (CompareDQSTestPattern_D(pMCTstat, pDCTstat, TestAddr << 8) & 0xff); /* [Lane 7 :: Lane 0] 0 = fail, 1 = pass */
/* Store any lanes that passed testing for later use */
for (lane = 0; lane < 8; lane++)
@@ -814,7 +814,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
for (ReceiverDTD = 0; ReceiverDTD < MAX_CS_SUPPORTED; ReceiverDTD += 2) {
printk(BIOS_DEBUG, "\t\tReceiver: %02x:", ReceiverDTD);
p = pDCTstat->CH_D_DIR_B_DQS[ChannelDTD][ReceiverDTD >> 1][Dir];
- for (i=0;i<8; i++) {
+ for (i = 0; i < 8; i++) {
val = p[i];
printk(BIOS_DEBUG, " %02x", val);
}
@@ -834,7 +834,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat,
lo &= ~(1<<17); /* restore HWCR.wrap32dis */
_WRMSR(addr, lo, hi);
}
- if (!_SSE2){
+ if (!_SSE2) {
cr4 = read_cr4();
cr4 &= ~(1<<9); /* restore cr4.OSFXSR */
write_cr4(cr4);
@@ -1583,7 +1583,7 @@ static uint8_t TrainDQSRdWrPos_D_Fam15(struct MCTStatStruc *pMCTstat,
for (ReceiverDTD = 0; ReceiverDTD < MAX_CS_SUPPORTED; ReceiverDTD += 2) {
printk(BIOS_DEBUG, "\t\tReceiver: %02x:", ReceiverDTD);
p = pDCTstat->CH_D_DIR_B_DQS[ChannelDTD][ReceiverDTD >> 1][Dir];
- for (i=0;i<8; i++) {
+ for (i = 0; i < 8; i++) {
val = p[i];
printk(BIOS_DEBUG, " %02x", val);
}
@@ -1843,7 +1843,7 @@ static void TrainDQSReceiverEnCyc_D_Fam15(struct MCTStatStruc *pMCTstat,
for (ReceiverDTD = 0; ReceiverDTD < MAX_CS_SUPPORTED; ReceiverDTD += 2) {
printk(BIOS_DEBUG, "\t\tReceiver: %02x:", ReceiverDTD);
p = pDCTstat->CH_D_DIR_B_DQS[ChannelDTD][ReceiverDTD >> 1][Dir];
- for (i=0;i<8; i++) {
+ for (i = 0; i < 8; i++) {
val = p[i];
printk(BIOS_DEBUG, " %02x", val);
}
@@ -1863,7 +1863,7 @@ static void TrainDQSReceiverEnCyc_D_Fam15(struct MCTStatStruc *pMCTstat,
lo &= ~(1<<17); /* restore HWCR.wrap32dis */
_WRMSR(addr, lo, hi);
}
- if (!_SSE2){
+ if (!_SSE2) {
cr4 = read_cr4();
cr4 &= ~(1<<9); /* restore cr4.OSFXSR */
write_cr4(cr4);
@@ -1890,11 +1890,11 @@ static void SetupDqsPattern_D(struct MCTStatStruc *pMCTstat,
buf = (u32 *)(((u32)buffer + 0x10) & (0xfffffff0));
if (pDCTstat->Status & (1<<SB_128bitmode)) {
pDCTstat->Pattern = 1; /* 18 cache lines, alternating qwords */
- for (i=0; i<16*18; i++)
+ for (i = 0; i < 16*18; i++)
buf[i] = TestPatternJD1b_D[i];
} else {
pDCTstat->Pattern = 0; /* 9 cache lines, sequential qwords */
- for (i=0; i<16*9; i++)
+ for (i = 0; i < 16*9; i++)
buf[i] = TestPatternJD1a_D[i];
}
pDCTstat->PtrPatternBufA = (u32)buf;
@@ -1966,10 +1966,10 @@ static u8 ChipSelPresent_D(struct MCTStatStruc *pMCTstat,
else
dct = 0;
- if (ChipSel < MAX_CS_SUPPORTED){
+ if (ChipSel < MAX_CS_SUPPORTED) {
reg = 0x40 + (ChipSel << 2);
val = Get_NB32_DCT(dev, dct, reg);
- if (val & ( 1 << 0))
+ if (val & (1 << 0))
ret = 1;
}
@@ -2058,7 +2058,7 @@ static u16 CompareDQSTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStat
}
bytelane = 0; /* bytelane counter */
- bitmap = 0xFFFF; /* bytelane test bitmap, 1=pass */
+ bitmap = 0xFFFF; /* bytelane test bitmap, 1 = pass */
MEn1Results = 0xFFFF;
BeatCnt = 0;
for (i = 0; i < (9 * 64 / 4); i++) { /* sizeof testpattern. /4 due to next loop */
@@ -2102,7 +2102,7 @@ static u16 CompareDQSTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStat
if (!bitmap)
break;
- if (bytelane == 0){
+ if (bytelane == 0) {
BeatCnt += 4;
if (!(pDCTstat->Status & (1 << SB_128bitmode))) {
if (BeatCnt == 8) BeatCnt = 0; /* 8 beat burst */
@@ -2132,7 +2132,7 @@ static void FlushDQSTestPattern_D(struct DCTStatStruc *pDCTstat,
u32 addr_lo)
{
/* Flush functions in mct_gcc.h */
- if (pDCTstat->Pattern == 0){
+ if (pDCTstat->Pattern == 0) {
FlushDQSTestPattern_L9(addr_lo);
} else {
FlushDQSTestPattern_L18(addr_lo);
@@ -2349,9 +2349,9 @@ u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat,
val &= ~0xe007c01f;
- /* unganged mode DCT0+DCT1, sys addr of DCT1=node
+ /* unganged mode DCT0+DCT1, sys addr of DCT1 = node
* base+DctSelBaseAddr+local ca base*/
- if ((Channel) && (pDCTstat->GangedMode == 0) && ( pDCTstat->DIMMValidDCT[0] > 0)) {
+ if ((Channel) && (pDCTstat->GangedMode == 0) && (pDCTstat->DIMMValidDCT[0] > 0)) {
reg = 0x110;
dword = Get_NB32(dev, reg);
dword &= 0xfffff800;
@@ -2365,7 +2365,7 @@ u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat,
val += dword;
}
} else {
- /* sys addr=node base+local cs base */
+ /* sys addr = node base+local cs base */
val += pDCTstat->DCTSysBase;
/* New stuff */
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c
index 5d31849fb4..ca3678997b 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c
@@ -36,7 +36,7 @@ static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat);
*
* Conditions for setting background scrubber.
* 1. node is present
- * 2. node has dram functioning (WE=RE=1)
+ * 2. node has dram functioning (WE = RE = 1)
* 3. all eccdimms (or bit 17 of offset 90,fn 2)
* 4. no chip-select gap exists
*
@@ -152,10 +152,10 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
val = Get_NB32(dev, reg);
/* WE/RE is checked */
- if ((val & 3)==3) { /* Node has dram populated */
+ if ((val & 3) == 3) { /* Node has dram populated */
/* Negate 'all nodes/dimms ECC' flag if non ecc
memory populated */
- if ( pDCTstat->Status & (1<<SB_ECCDIMMs)) {
+ if (pDCTstat->Status & (1 << SB_ECCDIMMs)) {
LDramECC = isDramECCEn_D(pDCTstat);
if (pDCTstat->ErrCode != SC_RunningOK) {
pDCTstat->Status &= ~(1 << SB_ECCDIMMs);
@@ -195,9 +195,9 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
}
if (AllECC)
- pMCTstat->GStatus |= 1<<GSB_ECCDIMMs;
+ pMCTstat->GStatus |= 1 << GSB_ECCDIMMs;
else
- pMCTstat->GStatus &= ~(1<<GSB_ECCDIMMs);
+ pMCTstat->GStatus &= ~(1 << GSB_ECCDIMMs);
/* Program the Dram BKScrub CTL to the proper (user selected) value.*/
/* Reset MC4_STS. */
@@ -206,11 +206,11 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
pDCTstat = pDCTstatA + Node;
LDramECC = 0;
if (NodePresent_D(Node)) { /* If Node is present */
- reg = 0x40+(Node<<3); /* Dram Base Node 0 + index */
+ reg = 0x40+(Node << 3); /* Dram Base Node 0 + index */
val = Get_NB32(pDCTstat->dev_map, reg);
curBase = val & 0xffff0000;
/*WE/RE is checked because memory config may have been */
- if ((val & 3)==3) { /* Node has dram populated */
+ if ((val & 3) == 3) { /* Node has dram populated */
if (isDramECCEn_D(pDCTstat)) { /* if ECC is enabled on this dram */
dev = pDCTstat->dev_nbmisc;
val = curBase << 8;
@@ -322,16 +322,16 @@ static void setSyncOnUnEccEn_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat;
pDCTstat = pDCTstatA + Node;
if (NodePresent_D(Node)) { /* If Node is present*/
- reg = 0x40+(Node<<3); /* Dram Base Node 0 + index*/
+ reg = 0x40+(Node << 3); /* Dram Base Node 0 + index*/
val = Get_NB32(pDCTstat->dev_map, reg);
/*WE/RE is checked because memory config may have been*/
- if ((val & 3)==3) { /* Node has dram populated*/
- if ( isDramECCEn_D(pDCTstat)) {
+ if ((val & 3) == 3) { /* Node has dram populated*/
+ if (isDramECCEn_D(pDCTstat)) {
/*if ECC is enabled on this dram*/
dev = pDCTstat->dev_nbmisc;
reg = 0x44; /* MCA NB Configuration*/
val = Get_NB32(dev, reg);
- val |= (1<<SyncOnUcEccEn);
+ val |= (1 << SyncOnUcEccEn);
Set_NB32(dev, reg, val);
}
} /* Node has Dram*/
@@ -353,11 +353,11 @@ static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat)
} else {
ch_end = 2;
}
- for (i=0; i<ch_end; i++) {
- if (pDCTstat->DIMMValidDCT[i] > 0){
+ for (i = 0; i < ch_end; i++) {
+ if (pDCTstat->DIMMValidDCT[i] > 0) {
reg = 0x90; /* Dram Config Low */
val = Get_NB32_DCT(dev, i, reg);
- if (val & (1<<DimmEcEn)) {
+ if (val & (1 << DimmEcEn)) {
/* set local flag 'dram ecc capable' */
isDimmECCEn = 1;
break;
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c
index 8ed2befb34..558b3e3498 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c
@@ -34,11 +34,11 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
/* Set temporary top of memory from Node structure data.
* Adjust temp top of memory down to accommodate 32-bit IO space.
- * Bottom40bIO=top of memory, right justified 8 bits
+ * Bottom40bIO = top of memory, right justified 8 bits
* (defines dram versus IO space type)
- * Bottom32bIO=sub 4GB top of memory, right justified 8 bits
+ * Bottom32bIO = sub 4GB top of memory, right justified 8 bits
* (defines dram versus IO space type)
- * Cache32bTOP=sub 4GB top of WB cacheable memory,
+ * Cache32bTOP = sub 4GB top of WB cacheable memory,
* right justified 8 bits
*/
@@ -82,8 +82,8 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
*/
addr = 0x204; /* MTRR phys base 2*/
/* use TOP_MEM as limit*/
- /* Limit=TOP_MEM|TOM2*/
- /* Base=0*/
+ /* Limit = TOP_MEM|TOM2*/
+ /* Base = 0*/
printk(BIOS_DEBUG, "\t CPUMemTyping: Cache32bTOP:%x\n", Cache32bTOP);
SetMTRRrangeWB_D(0, &Cache32bTOP, &addr);
/* Base */
@@ -112,10 +112,10 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat,
addr = 0xC0010010; /* SYS_CFG */
_RDMSR(addr, &lo, &hi);
if (Bottom40bIO) {
- lo |= (1<<21); /* MtrrTom2En=1 */
+ lo |= (1<<21); /* MtrrTom2En = 1 */
lo |= (1<<22); /* Tom2ForceMemTypeWB */
} else {
- lo &= ~(1<<21); /* MtrrTom2En=0 */
+ lo &= ~(1<<21); /* MtrrTom2En = 0 */
lo &= ~(1<<22); /* Tom2ForceMemTypeWB */
}
_WRMSR(addr, lo, hi);
@@ -146,7 +146,7 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType)
* next set bit in a forward or backward sequence of bits (as a function
* of the Limit). We start with the ascending path, to ensure that
* regions are naturally aligned, then we switch to the descending path
- * to maximize MTRR usage efficiency. Base=0 is a special case where we
+ * to maximize MTRR usage efficiency. Base = 0 is a special case where we
* start with the descending path. Correct Mask for region is
* 2comp(Size-1)-1, which is 2comp(Limit-Base-1)-1
*/
@@ -172,17 +172,17 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType)
curSize = valx;
valx += curBase;
}
- curLimit = valx; /*eax=curBase, edx=curLimit*/
+ curLimit = valx; /*eax = curBase, edx = curLimit*/
valx = val>>24;
val <<= 8;
/* now program the MTRR */
val |= MtrrType; /* set cache type (UC or WB)*/
_WRMSR(addr, val, valx); /* prog. MTRR with current region Base*/
- val = ((~(curSize - 1))+1) - 1; /* Size-1*/ /*Mask=2comp(Size-1)-1*/
+ val = ((~(curSize - 1))+1) - 1; /* Size-1*/ /*Mask = 2comp(Size-1)-1*/
valx = (val >> 24) | (0xff00); /* GH have 48 bits addr */
val <<= 8;
- val |= ( 1 << 11); /* set MTRR valid*/
+ val |= (1 << 11); /* set MTRR valid*/
addr++;
_WRMSR(addr, val, valx); /* prog. MTRR with current region Mask*/
val = curLimit;
@@ -213,9 +213,9 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat
/*======================================================================
* Adjust temp top of memory down to accommodate UMA memory start
*======================================================================*/
- /* Bottom32bIO=sub 4GB top of memory, right justified 8 bits
+ /* Bottom32bIO = sub 4GB top of memory, right justified 8 bits
* (defines dram versus IO space type)
- * Cache32bTOP=sub 4GB top of WB cacheable memory, right justified 8 bits */
+ * Cache32bTOP = sub 4GB top of WB cacheable memory, right justified 8 bits */
Bottom32bIO = pMCTstat->Sub4GCacheTop >> 8;
@@ -234,7 +234,7 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat
addr = 0x200;
lo = 0;
hi = lo;
- while ( addr < 0x20C) {
+ while (addr < 0x20C) {
_WRMSR(addr, lo, hi); /* prog. MTRR with current region Mask */
addr++; /* next MTRR pair addr */
}
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c
index 9a769adbaf..bf84171255 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c
@@ -68,7 +68,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
NodesWmem++;
Base &= 0xFFFF0000; /* Base[39:8] */
- if (pDCTstat->Status & (1 << SB_HWHole )) {
+ if (pDCTstat->Status & (1 << SB_HWHole)) {
/* to get true amount of dram,
* subtract out memory hole if HW dram remapping */
@@ -83,7 +83,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
DctSelBase = Get_NB32(pDCTstat->dev_dct, 0x114);
if (DctSelBase) {
DctSelBase <<= 8;
- if ( pDCTstat->Status & (1 << SB_HWHole)) {
+ if (pDCTstat->Status & (1 << SB_HWHole)) {
if (DctSelBase >= 0x1000000) {
DctSelBase -= HWHoleSz;
}
@@ -100,7 +100,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
MemSize &= 0xFFFF0000;
MemSize += 0x00010000;
MemSize -= Base;
- if ( pDCTstat->Status & (1 << SB_HWHole)) {
+ if (pDCTstat->Status & (1 << SB_HWHole)) {
MemSize -= HWHoleSz;
}
if (Node == 0) {
@@ -139,7 +139,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
if (DoIntlv) {
MCTMemClr_D(pMCTstat, pDCTstatA);
/* Program Interleaving enabled on Node 0 map only.*/
- MemSize0 <<= bsf(Nodes); /* MemSize=MemSize*2 (or 4, or 8) */
+ MemSize0 <<= bsf(Nodes); /* MemSize = MemSize*2 (or 4, or 8) */
Dct0MemSize <<= bsf(Nodes);
MemSize0 += HWHoleSz;
Base = ((Nodes - 1) << 8) | 3;
@@ -180,7 +180,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat,
HoleBase = pMCTstat->HoleBase;
if (Dct0MemSize >= HoleBase) {
val = HWHoleSz;
- if ( Node == 0) {
+ if (Node == 0) {
val += Dct0MemSize;
}
} else {
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctrci.c b/src/northbridge/amd/amdmct/mct_ddr3/mctrci.c
index 951a71265e..ac24c6d8cc 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctrci.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctrci.c
@@ -336,14 +336,14 @@ void mct_DramControlReg_Init_D(struct MCTStatStruc *pMCTstat,
printk(BIOS_SPEW, "%s: F2xA8: %08x\n", __func__, val);
if (is_fam15h()) {
- for (cw=0; cw <=15; cw ++) {
+ for (cw = 0; cw <=15; cw ++) {
val = mct_ControlRC(pMCTstat, pDCTstat, dct, MrsChipSel << rc_word_chip_select_lower_bit(), cw);
mct_SendCtrlWrd(pMCTstat, pDCTstat, dct, val);
if ((cw == 2) || (cw == 8) || (cw == 10))
precise_ndelay_fam15(pMCTstat, 6000);
}
} else {
- for (cw=0; cw <=15; cw ++) {
+ for (cw = 0; cw <=15; cw ++) {
mct_Wait(1600);
val = mct_ControlRC(pMCTstat, pDCTstat, dct, MrsChipSel << rc_word_chip_select_lower_bit(), cw);
mct_SendCtrlWrd(pMCTstat, pDCTstat, dct, val);
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c
index 670d640a6c..18af172e7c 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsdi.c
@@ -912,7 +912,7 @@ static u32 mct_MR1(struct MCTStatStruc *pMCTstat,
/* program MrsAddress[11]=TDQS: based on F2x[1,0]94[RDqsEn] */
if (Get_NB32_DCT(dev, dct, 0x94) & (1 << RDqsEn)) {
u8 bit;
- /* Set TDQS=1b for x8 DIMM, TDQS=0b for x4 DIMM, when mixed x8 & x4 */
+ /* Set TDQS = 1b for x8 DIMM, TDQS = 0b for x4 DIMM, when mixed x8 & x4 */
bit = (ret >> 21) << 1;
if ((dct & 1) != 0)
bit ++;
@@ -1063,7 +1063,7 @@ static void mct_SendZQCmd(struct DCTStatStruc *pDCTstat, u8 dct)
printk(BIOS_DEBUG, "%s: Start\n", __func__);
/*1.Program MrsAddress[10]=1
- 2.Set SendZQCmd=1
+ 2.Set SendZQCmd = 1
*/
dword = Get_NB32_DCT(dev, dct, 0x7C);
dword &= ~0xFFFFFF;
@@ -1071,7 +1071,7 @@ static void mct_SendZQCmd(struct DCTStatStruc *pDCTstat, u8 dct)
dword |= 1 << SendZQCmd;
Set_NB32_DCT(dev, dct, 0x7C, dword);
- /* Wait for SendZQCmd=0 */
+ /* Wait for SendZQCmd = 0 */
do {
dword = Get_NB32_DCT(dev, dct, 0x7C);
} while (dword & (1 << SendZQCmd));
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c
index 802417971f..eac201346d 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c
@@ -76,7 +76,7 @@ static void SetupRcvrPattern(struct MCTStatStruc *pMCTstat,
p_A = (u32 *)SetupDqsPattern_1PassB(pass);
p_B = (u32 *)SetupDqsPattern_1PassA(pass);
- for (i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
buf_a[i] = p_A[i];
buf_b[i] = p_B[i];
}
@@ -560,7 +560,7 @@ static uint32_t convert_testaddr_and_channel_to_address(struct DCTStatStruc *pDC
SetUpperFSbase(testaddr);
testaddr <<= 8;
- if ((pDCTstat->Status & (1<<SB_128bitmode)) && channel ) {
+ if ((pDCTstat->Status & (1<<SB_128bitmode)) && channel) {
testaddr += 8; /* second channel */
}
@@ -636,7 +636,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat,
}
cr4 = read_cr4();
- if (cr4 & ( 1 << 9)) { /* save the old value */
+ if (cr4 & (1 << 9)) { /* save the old value */
_SSE2 = 1;
}
cr4 |= (1 << 9); /* OSFXSR enable SSE2 */
@@ -986,7 +986,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat,
msr.lo &= ~(1<<17); /* restore HWCR.wrap32dis */
wrmsr(HWCR, msr);
}
- if (!_SSE2){
+ if (!_SSE2) {
cr4 = read_cr4();
cr4 &= ~(1<<9); /* restore cr4.OSFXSR */
write_cr4(cr4);
@@ -996,7 +996,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat,
{
u8 ChannelDTD;
printk(BIOS_DEBUG, "TrainRcvrEn: CH_MaxRdLat:\n");
- for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) {
+ for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
printk(BIOS_DEBUG, "Channel:%x: %x\n",
ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]);
}
@@ -1013,10 +1013,10 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat,
printk(BIOS_DEBUG, "TrainRcvrEn: CH_D_B_RCVRDLY:\n");
for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
printk(BIOS_DEBUG, "Channel:%x\n", ChannelDTD);
- for (ReceiverDTD = 0; ReceiverDTD<8; ReceiverDTD+=2) {
+ for (ReceiverDTD = 0; ReceiverDTD < 8; ReceiverDTD+=2) {
printk(BIOS_DEBUG, "\t\tReceiver:%x:", ReceiverDTD);
p = pDCTstat->CH_D_B_RCVRDLY[ChannelDTD][ReceiverDTD>>1];
- for (i=0;i<8; i++) {
+ for (i = 0; i < 8; i++) {
valDTD = p[i];
printk(BIOS_DEBUG, " %03x", valDTD);
}
@@ -1246,7 +1246,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat,
}
cr4 = read_cr4();
- if (cr4 & ( 1 << 9)) { /* save the old value */
+ if (cr4 & (1 << 9)) { /* save the old value */
_SSE2 = 1;
}
cr4 |= (1 << 9); /* OSFXSR enable SSE2 */
@@ -1500,7 +1500,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat,
lo &= ~(1<<17); /* restore HWCR.wrap32dis */
_WRMSR(msr, lo, hi);
}
- if (!_SSE2){
+ if (!_SSE2) {
cr4 = read_cr4();
cr4 &= ~(1<<9); /* restore cr4.OSFXSR */
write_cr4(cr4);
@@ -1510,7 +1510,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat,
{
u8 ChannelDTD;
printk(BIOS_DEBUG, "TrainRcvrEn: CH_MaxRdLat:\n");
- for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) {
+ for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
printk(BIOS_DEBUG, "Channel:%x: %x\n",
ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]);
}
@@ -1527,10 +1527,10 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat,
printk(BIOS_DEBUG, "TrainRcvrEn: CH_D_B_RCVRDLY:\n");
for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
printk(BIOS_DEBUG, "Channel:%x\n", ChannelDTD);
- for (ReceiverDTD = 0; ReceiverDTD<8; ReceiverDTD+=2) {
+ for (ReceiverDTD = 0; ReceiverDTD < 8; ReceiverDTD+=2) {
printk(BIOS_DEBUG, "\t\tReceiver:%x:", ReceiverDTD);
p = pDCTstat->CH_D_B_RCVRDLY[ChannelDTD][ReceiverDTD>>1];
- for (i=0;i<8; i++) {
+ for (i = 0; i < 8; i++) {
valDTD = p[i];
printk(BIOS_DEBUG, " %03x", valDTD);
}
@@ -1604,7 +1604,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat,
ch_end = 2;
cr4 = read_cr4();
- if (cr4 & ( 1 << 9)) { /* save the old value */
+ if (cr4 & (1 << 9)) { /* save the old value */
_SSE2 = 1;
}
cr4 |= (1 << 9); /* OSFXSR enable SSE2 */
@@ -1720,7 +1720,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat,
lo &= ~(1<<17); /* restore HWCR.wrap32dis */
_WRMSR(msr, lo, hi);
}
- if (!_SSE2){
+ if (!_SSE2) {
cr4 = read_cr4();
cr4 &= ~(1<<9); /* restore cr4.OSFXSR */
write_cr4(cr4);
@@ -1730,7 +1730,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat,
{
u8 ChannelDTD;
printk(BIOS_DEBUG, "TrainMaxRdLatency: CH_MaxRdLat:\n");
- for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) {
+ for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
printk(BIOS_DEBUG, "Channel:%x: %x\n",
ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]);
}
@@ -1745,7 +1745,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat,
u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct)
{
- if (pDCTstat->DIMMValidDCT[dct] == 0 ) {
+ if (pDCTstat->DIMMValidDCT[dct] == 0) {
return 8;
} else {
return 0;
@@ -1766,7 +1766,7 @@ static void mct_DisableDQSRcvEn_D(struct DCTStatStruc *pDCTstat)
ch_end = 2;
}
- for (ch=0; ch<ch_end; ch++) {
+ for (ch = 0; ch < ch_end; ch++) {
reg = 0x78;
val = Get_NB32_DCT(dev, ch, reg);
val &= ~(1 << DqsRcvEnTrain);
@@ -1800,14 +1800,14 @@ void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u16 RcvrEnDly,
}
/* DimmOffset not needed for CH_D_B_RCVRDLY array */
- for (i=0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
if (FinalValue) {
/*calculate dimm offset */
p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1];
RcvrEnDly = p[i];
}
- /* if flag=0, set DqsRcvEn value to reg. */
+ /* if flag = 0, set DqsRcvEn value to reg. */
/* get the register index from table */
index = Table_DQSRcvEn_Offset[i >> 1];
index += Addl_Index; /* DIMMx DqsRcvEn byte0 */
@@ -1852,7 +1852,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u16 D
uint8_t package_type = mctGet_NVbits(NV_PACK_TYPE);
if ((package_type == PT_L1) /* Socket F (1207) */
|| (package_type == PT_M2) /* Socket AM3 */
- || (package_type == PT_S1)) { /* Socket S1g<x> */
+ || (package_type == PT_S1)) { /* Socket S1g <x> */
cpu_val_n = 10;
cpu_val_p = 11;
} else {
@@ -1950,7 +1950,7 @@ static void mct_InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
* Read Position is 1/2 Memclock Delay
*/
u8 i;
- for (i=0;i<2; i++){
+ for (i = 0; i < 2; i++) {
InitDQSPos4RcvrEn_D(pMCTstat, pDCTstat, i);
}
}
@@ -1972,8 +1972,8 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
/* FIXME: add Cx support */
dword = 0x00000000;
- for (i=1; i<=3; i++) {
- for (j=0; j<dn; j++)
+ for (i = 1; i <= 3; i++) {
+ for (j = 0; j < dn; j++)
/* DIMM0 Write Data Timing Low */
/* DIMM0 Write ECC Timing */
Set_NB32_index_wait_DCT(dev, Channel, index_reg, i + 0x100 * j, dword);
@@ -1981,14 +1981,14 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat,
/* errata #180 */
dword = 0x2f2f2f2f;
- for (i=5; i<=6; i++) {
- for (j=0; j<dn; j++)
+ for (i = 5; i <= 6; i++) {
+ for (j = 0; j < dn; j++)
/* DIMM0 Read DQS Timing Control Low */
Set_NB32_index_wait_DCT(dev, Channel, index_reg, i + 0x100 * j, dword);
}
dword = 0x0000002f;
- for (j=0; j<dn; j++)
+ for (j = 0; j < dn; j++)
/* DIMM0 Read DQS ECC Timing Control */
Set_NB32_index_wait_DCT(dev, Channel, index_reg, 7 + 0x100 * j, dword);
}
@@ -2087,7 +2087,7 @@ void mctSetEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat,
if (!pDCTstat->NodePresent)
break;
if (pDCTstat->DCTSysLimit) {
- for (i=0; i<2; i++)
+ for (i = 0; i < 2; i++)
CalcEccDQSRcvrEn_D(pMCTstat, pDCTstat, i);
}
}
@@ -2427,5 +2427,5 @@ void mct_Wait(u32 cycles)
saved = lo;
do {
_RDMSR(msr, &lo, &hi);
- } while (lo - saved < cycles );
+ } while (lo - saved < cycles);
}
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c
index d5357355cd..30cf19ba30 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c
@@ -49,7 +49,7 @@ static u16 mct_Average_RcvrEnDly_1Pass(struct DCTStatStruc *pDCTstat, u8 Channel
MaxValue = 0;
p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1];
- for (i=0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
/* get left value from DCTStatStruc.CHA_D0_B0_RCVRDLY*/
val = p[i];
/* get right value from DCTStatStruc.CHA_D0_B0_RCVRDLY_1*/
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c
index 2f4d4da82b..7f678248c3 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c
@@ -58,7 +58,7 @@ u8 mct_Get_Start_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
u8 bn;
bn = 8;
- for ( i=0;i<bn; i++) {
+ for (i = 0; i < bn; i++) {
val = p[i];
if (val > max) {
@@ -91,7 +91,7 @@ u16 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
/* FIXME: which byte? */
p_1 = pDCTstat->B_RCVRDLY_1;
/* p_1 = pDCTstat->CH_D_B_RCVRDLY_1[Channel][Receiver>>1]; */
- for (i=0; i<bn; i++) {
+ for (i = 0; i < bn; i++) {
val = p[i];
/* left edge */
if (val != (RcvrEnDlyLimit - 1)) {
@@ -111,7 +111,7 @@ u16 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat,
pDCTstat->DimmTrainFail &= ~(1<<(Receiver + Channel));
}
} else {
- for (i=0; i < bn; i++) {
+ for (i = 0; i < bn; i++) {
val = p[i];
/* Add 1/2 Memlock delay */
/* val += Pass1MemClkDly; */
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c b/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c
index 15eb67e15b..78db68c9d0 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c
@@ -144,7 +144,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
print_debug_dqs("\tMaxRdLatencyTrain51: Channel ",Channel, 1);
pDCTstat->Channel = Channel;
- if ( (pDCTstat->Status & (1 << SB_128bitmode)) && Channel)
+ if ((pDCTstat->Status & (1 << SB_128bitmode)) && Channel)
break; /*if ganged mode, skip DCT 1 */
TestAddr0 = GetMaxRdLatTestAddr_D(pMCTstat, pDCTstat, Channel, &RcvrEnDly, &valid);
@@ -159,7 +159,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
while (MaxRdLatDly < MAX_RD_LAT) { /* sweep Delay value here */
mct_setMaxRdLatTrnVal_D(pDCTstat, Channel, MaxRdLatDly);
ReadMaxRdLat1CLTestPattern_D(TestAddr0);
- if ( CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS)
+ if (CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS)
break;
SetTargetWTIO_D(TestAddr0);
FlushMaxRdLatTestPattern_D(TestAddr0);
@@ -180,7 +180,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
lo &= ~(1<<17); /* restore HWCR.wrap32dis */
_WRMSR(addr, lo, hi);
}
- if (!_SSE2){
+ if (!_SSE2) {
cr4 = read_cr4();
cr4 &= ~(1<<9); /* restore cr4.OSFXSR */
write_cr4(cr4);
@@ -190,7 +190,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat,
{
u8 ChannelDTD;
printk(BIOS_DEBUG, "maxRdLatencyTrain: CH_MaxRdLat:\n");
- for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) {
+ for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) {
printk(BIOS_DEBUG, "Channel: %02x: %02x\n", ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]);
}
}
@@ -207,7 +207,7 @@ static void mct_setMaxRdLatTrnVal_D(struct DCTStatStruc *pDCTstat,
if (pDCTstat->GangedMode) {
Channel = 0; /* for safe */
- for (i=0; i<2; i++)
+ for (i = 0; i < 2; i++)
pDCTstat->CH_MaxRdLat[i][0] = MaxRdLatVal;
} else {
pDCTstat->CH_MaxRdLat[Channel][0] = MaxRdLatVal;
@@ -239,7 +239,7 @@ static u8 CompareMaxRdLatTestPattern_D(u32 pattern_buf, u32 addr)
addr_lo = addr<<8;
_EXECFENCE;
- for (i=0; i<(16*3); i++) {
+ for (i = 0; i < 16*3; i++) {
val = read32_fs(addr_lo);
val_test = test_buf[i];
@@ -284,11 +284,11 @@ static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat,
*valid = 0;
for (ch = ch_start; ch < ch_end; ch++) {
- for (d=0; d<4; d++) {
- for (Byte = 0; Byte<bn; Byte++) {
+ for (d = 0; d < 4; d++) {
+ for (Byte = 0; Byte < bn; Byte++) {
u8 tmp;
tmp = pDCTstat->CH_D_B_RCVRDLY[ch][d][Byte];
- if (tmp>Max) {
+ if (tmp > Max) {
Max = tmp;
Channel_Max = Channel;
d_Max = d;
@@ -371,7 +371,7 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat,
valx = (val) << 2; /* SubTotal div 4 to scale 1/4 MemClk back to MemClk */
val = Get_NB32(pDCTstat->dev_nbmisc, 0xD4);
- val = ((val & 0x1f) + 4 ) * 3;
+ val = ((val & 0x1f) + 4) * 3;
/* Calculate 1 MemClk + 1 NCLK delay in NCLKs for margin */
valxx = val << 2;
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctwl.c b/src/northbridge/amd/amdmct/mct_ddr3/mctwl.c
index 44ea6e8f84..47c5004c0c 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mctwl.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mctwl.c
@@ -426,7 +426,7 @@ void SetTargetFreq(struct MCTStatStruc *pMCTstat,
}
}
- /* wait for 500 MCLKs after ExitSelfRef, 500*2.5ns=1250ns */
+ /* wait for 500 MCLKs after ExitSelfRef, 500*2.5ns = 1250ns */
mct_Wait(250);
if (pDCTstat->Status & (1 << SB_Registered)) {
@@ -474,9 +474,9 @@ void Restore_OnDimmMirror(struct MCTStatStruc *pMCTstat,
{
if (pDCTstat->LogicalCPUID & (AMD_DR_Bx /* | AMD_RB_C0 */)) { /* We dont support RB-C0 now */
if (pDCTstat->MirrPresU_NumRegR & 0x55)
- Modify_OnDimmMirror(pDCTstat, 0, 1); /* dct=0, set */
+ Modify_OnDimmMirror(pDCTstat, 0, 1); /* dct = 0, set */
if (pDCTstat->MirrPresU_NumRegR & 0xAA)
- Modify_OnDimmMirror(pDCTstat, 1, 1); /* dct=1, set */
+ Modify_OnDimmMirror(pDCTstat, 1, 1); /* dct = 1, set */
}
}
void Clear_OnDimmMirror(struct MCTStatStruc *pMCTstat,
@@ -484,8 +484,8 @@ void Clear_OnDimmMirror(struct MCTStatStruc *pMCTstat,
{
if (pDCTstat->LogicalCPUID & (AMD_DR_Bx /* | AMD_RB_C0 */)) { /* We dont support RB-C0 now */
if (pDCTstat->MirrPresU_NumRegR & 0x55)
- Modify_OnDimmMirror(pDCTstat, 0, 0); /* dct=0, clear */
+ Modify_OnDimmMirror(pDCTstat, 0, 0); /* dct = 0, clear */
if (pDCTstat->MirrPresU_NumRegR & 0xAA)
- Modify_OnDimmMirror(pDCTstat, 1, 0); /* dct=1, clear */
+ Modify_OnDimmMirror(pDCTstat, 1, 0); /* dct = 1, clear */
}
}
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c
index 5c30bc554c..dce6212996 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c
@@ -140,15 +140,15 @@ uint8_t AgesaHwWlPhase1(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCT
*/
if (dct)
{
- Addl_Data_Offset=0x198;
- Addl_Data_Port=0x19C;
+ Addl_Data_Offset = 0x198;
+ Addl_Data_Port = 0x19C;
}
else
{
- Addl_Data_Offset=0x98;
- Addl_Data_Port=0x9C;
+ Addl_Data_Offset = 0x98;
+ Addl_Data_Port = 0x9C;
}
- Addr=0x0D00000C;
+ Addr = 0x0D00000C;
AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Offset), 31, 0, &Addr);
while ((get_Bits(pDCTData,FUN_DCT,pDCTData->NodeId, FUN_DCT, Addl_Data_Offset,
DctAccessDone, DctAccessDone)) == 0);
@@ -157,7 +157,7 @@ uint8_t AgesaHwWlPhase1(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCT
Value = bitTestReset(Value, 4); /* for x8 only */
Value = bitTestReset(Value, 5); /* for hardware WL training */
AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Port), 31, 0, &Value);
- Addr=0x4D030F0C;
+ Addr = 0x4D030F0C;
AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Offset), 31, 0, &Addr);
while ((get_Bits(pDCTData,FUN_DCT,pDCTData->NodeId, FUN_DCT, Addl_Data_Offset,
DctAccessDone, DctAccessDone)) == 0);
@@ -397,8 +397,8 @@ u32 swapAddrBits_wl(struct DCTStatStruc *pDCTstat, uint8_t dct, uint32_t MRSValu
tempW &= 0x0A8;
tempW1 &= 0x0150;
MRSValue &= 0xFE07;
- MRSValue |= (tempW<<1);
- MRSValue |= (tempW1>>1);
+ MRSValue |= (tempW << 1);
+ MRSValue |= (tempW1 >> 1);
}
}
return MRSValue;
@@ -438,8 +438,8 @@ u32 swapBankBits(struct DCTStatStruc *pDCTstat, uint8_t dct, u32 MRSValue)
tempW &= 0x01;
tempW1 &= 0x02;
MRSValue = 0;
- MRSValue |= (tempW<<1);
- MRSValue |= (tempW1>>1);
+ MRSValue |= (tempW << 1);
+ MRSValue |= (tempW1 >> 1);
}
}
return MRSValue;
@@ -453,22 +453,22 @@ static uint16_t unbuffered_dimm_nominal_termination_emrs(uint8_t number_of_dimms
if (number_of_dimms == 1) {
if (MaxDimmsInstallable < 3) {
- term = 0x04; /* Rtt_Nom=RZQ/4=60 Ohm */
+ term = 0x04; /* Rtt_Nom = RZQ/4 = 60 Ohm */
} else {
if (rank_count == 1) {
- term = 0x04; /* Rtt_Nom=RZQ/4=60 Ohm */
+ term = 0x04; /* Rtt_Nom = RZQ/4 = 60 Ohm */
} else {
if (rank == 0)
- term = 0x04; /* Rtt_Nom=RZQ/4=60 Ohm */
+ term = 0x04; /* Rtt_Nom = RZQ/4 = 60 Ohm */
else
- term = 0x00; /* Rtt_Nom=OFF */
+ term = 0x00; /* Rtt_Nom = OFF */
}
}
} else {
if (frequency_index < 5)
- term = 0x0044; /* Rtt_Nom=RZQ/6=40 Ohm */
+ term = 0x0044; /* Rtt_Nom = RZQ/6 = 40 Ohm */
else
- term = 0x0204; /* Rtt_Nom=RZQ/8=30 Ohm */
+ term = 0x0204; /* Rtt_Nom = RZQ/8 = 30 Ohm */
}
return term;
@@ -482,15 +482,15 @@ static uint16_t unbuffered_dimm_dynamic_termination_emrs(uint8_t number_of_dimms
if (number_of_dimms == 1) {
if (MaxDimmsInstallable < 3) {
- term = 0x00; /* Rtt_WR=off */
+ term = 0x00; /* Rtt_WR = off */
} else {
if (rank_count == 1)
- term = 0x00; /* Rtt_WR=off */
+ term = 0x00; /* Rtt_WR = off */
else
- term = 0x200; /* Rtt_WR=RZQ/4=60 Ohm */
+ term = 0x200; /* Rtt_WR = RZQ/4 = 60 Ohm */
}
} else {
- term = 0x400; /* Rtt_WR=RZQ/2=120 Ohm */
+ term = 0x400; /* Rtt_WR = RZQ/2 = 120 Ohm */
}
return term;
@@ -558,7 +558,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
tempW = mct_MR1(pMCTstat, pDCTstat, dct, dimm*2+rank) & 0xffff;
tempW &= ~(0x0244);
} else {
- /* Set TDQS=1b for x8 DIMM, TDQS=0b for x4 DIMM, when mixed x8 & x4 */
+ /* Set TDQS = 1b for x8 DIMM, TDQS = 0b for x4 DIMM, when mixed x8 & x4 */
tempW2 = get_Bits(pDCTData, dct, pDCTData->NodeId,
FUN_DCT, DRAM_CONFIG_HIGH, RDqsEn, RDqsEn);
if (tempW2)
@@ -618,7 +618,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
}
/* Apply Rtt_Nom to the MRS control word */
- tempW=tempW|tempW1;
+ tempW = tempW|tempW1;
/* All ranks of the target DIMM are set to write levelization mode. */
if (wl)
@@ -702,8 +702,8 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
{tempW = bitTestSet(tempW, 7);}
if (bitTest(tempW1,18))
{tempW = bitTestSet(tempW, 6);}
- /* tempW=tempW|(((tempW1>>20)&0x7)<<3); */
- tempW=tempW|((tempW1&0x00700000)>>17);
+ /* tempW = tempW|(((tempW1 >> 20) & 0x7 )<< 3); */
+ tempW = tempW|((tempW1&0x00700000) >> 17);
/* workaround for DR-B0 */
if ((pDCTData->LogicalCPUID & AMD_DR_Bx) && (pDCTData->Status[DCT_STATUS_REGISTERED]))
tempW+=0x8;
@@ -720,7 +720,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
}
/* Apply Rtt_WR to the MRS control word */
- tempW=tempW|tempW1;
+ tempW = tempW|tempW1;
tempW = swapAddrBits_wl(pDCTstat, dct, tempW);
if (is_fam15h())
set_Bits(pDCTData, dct, pDCTData->NodeId, FUN_DCT,
@@ -779,14 +779,14 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
/* Program F2x[1, 0]7C[MrsAddress[15:0]] to the required
* DDR3-defined function for write levelization.
*/
- tempW = 0;/* DLL_DIS = 0, DIC = 0, AL = 0, TDQS = 0, Level=0, Qoff=0 */
+ tempW = 0;/* DLL_DIS = 0, DIC = 0, AL = 0, TDQS = 0, Level = 0, Qoff = 0 */
/* Retrieve normal settings of the MRS control word and clear Rtt_Nom */
if (is_fam15h()) {
tempW = mct_MR1(pMCTstat, pDCTstat, dct, currDimm*2+rank) & 0xffff;
tempW &= ~(0x0244);
} else {
- /* Set TDQS=1b for x8 DIMM, TDQS=0b for x4 DIMM, when mixed x8 & x4 */
+ /* Set TDQS = 1b for x8 DIMM, TDQS = 0b for x4 DIMM, when mixed x8 & x4 */
tempW2 = get_Bits(pDCTData, dct, pDCTData->NodeId,
FUN_DCT, DRAM_CONFIG_HIGH, RDqsEn, RDqsEn);
if (tempW2)
@@ -811,7 +811,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
}
/* Apply Rtt_Nom to the MRS control word */
- tempW=tempW|tempW1;
+ tempW = tempW|tempW1;
/* Program MrsAddress[5,1]=output driver impedance control (DIC) */
if (is_fam15h()) {
@@ -877,8 +877,8 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
{tempW = bitTestSet(tempW, 7);}
if (bitTest(tempW1,18))
{tempW = bitTestSet(tempW, 6);}
- /* tempW=tempW|(((tempW1>>20)&0x7)<<3); */
- tempW=tempW|((tempW1&0x00700000)>>17);
+ /* tempW = tempW|(((tempW1 >> 20) & 0x7) << 3); */
+ tempW = tempW|((tempW1&0x00700000) >> 17);
/* workaround for DR-B0 */
if ((pDCTData->LogicalCPUID & AMD_DR_Bx) && (pDCTData->Status[DCT_STATUS_REGISTERED]))
tempW+=0x8;
@@ -895,7 +895,7 @@ void prepareDimms(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat,
}
/* Apply Rtt_WR to the MRS control word */
- tempW=tempW|tempW1;
+ tempW = tempW|tempW1;
tempW = swapAddrBits_wl(pDCTstat, dct, tempW);
if (is_fam15h())
set_Bits(pDCTData, dct, pDCTData->NodeId, FUN_DCT,
@@ -939,7 +939,7 @@ void programODT(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, ui
sMCTStruct *pMCTData = pDCTstat->C_MCTPtr;
sDCTStruct *pDCTData = pDCTstat->C_DCTPtr[dct];
- u8 WrLvOdt1=0;
+ u8 WrLvOdt1 = 0;
if (is_fam15h()) {
/* On Family15h processors, the value for the specific CS being targetted
@@ -954,7 +954,7 @@ void programODT(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, ui
cs = (dimm * 2) + rank;
/* Fetch preprogammed ODT pattern from configuration registers */
- dword = Get_NB32_DCT(pDCTstat->dev_dct, dct, ((cs>3)?0x23c:0x238));
+ dword = Get_NB32_DCT(pDCTstat->dev_dct, dct, ((cs > 3)?0x23c:0x238));
if ((cs == 7) || (cs == 3))
WrLvOdt1 = ((dword >> 24) & 0xf);
else if ((cs == 6) || (cs == 2))
@@ -1045,25 +1045,25 @@ void procConfig(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, ui
}
else
{
- /* Program WrLvOdtEn=1 through set bit 12 of D3CSODT reg offset 0 for Rev.B */
+ /* Program WrLvOdtEn = 1 through set bit 12 of D3CSODT reg offset 0 for Rev.B */
if (dct)
{
- Addl_Data_Offset=0x198;
- Addl_Data_Port=0x19C;
+ Addl_Data_Offset = 0x198;
+ Addl_Data_Port = 0x19C;
}
else
{
- Addl_Data_Offset=0x98;
- Addl_Data_Port=0x9C;
+ Addl_Data_Offset = 0x98;
+ Addl_Data_Port = 0x9C;
}
- Addr=0x0D008000;
+ Addr = 0x0D008000;
AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Offset), 31, 0, &Addr);
while ((get_Bits(pDCTData,FUN_DCT,pDCTData->NodeId, FUN_DCT, Addl_Data_Offset,
DctAccessDone, DctAccessDone)) == 0);
AmdMemPCIReadBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Port), 31, 0, &Value);
Value = bitTestSet(Value, 12);
AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Port), 31, 0, &Value);
- Addr=0x4D088F00;
+ Addr = 0x4D088F00;
AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+(pDCTData->NodeId),FUN_DCT,Addl_Data_Offset), 31, 0, &Addr);
while ((get_Bits(pDCTData,FUN_DCT,pDCTData->NodeId, FUN_DCT, Addl_Data_Offset,
DctAccessDone, DctAccessDone)) == 0);
@@ -1371,7 +1371,7 @@ void setWLByteDelay(struct DCTStatStruc *pDCTstat, uint8_t dct, u8 ByteLane, u8
while (ByteLane < lane_count)
{
/* This subtract 0xC workaround might be temporary. */
- if ((pDCTData->WLPass==2) && (pDCTData->RegMan1Present & (1<<(dimm*2+dct)))) {
+ if ((pDCTData->WLPass == 2) && (pDCTData->RegMan1Present & (1 << (dimm*2+dct)))) {
tempW = (pDCTData->WLGrossDelay[index+ByteLane] << 5) | pDCTData->WLFineDelay[index+ByteLane];
tempW -= 0xC;
pDCTData->WLGrossDelay[index+ByteLane] = (u8)(tempW >> 5);
diff --git a/src/northbridge/amd/amdmct/mct_ddr3/s3utils.c b/src/northbridge/amd/amdmct/mct_ddr3/s3utils.c
index 6589a39943..97cadcb828 100644
--- a/src/northbridge/amd/amdmct/mct_ddr3/s3utils.c
+++ b/src/northbridge/amd/amdmct/mct_ddr3/s3utils.c
@@ -351,12 +351,12 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
data->f2x11c = pci_read_config32(dev_fn2, 0x11c);
data->f2x1b0 = pci_read_config32(dev_fn2, 0x1b0);
data->f3x44 = pci_read_config32(dev_fn3, 0x44);
- for (i=0; i<16; i++) {
+ for (i = 0; i < 16; i++) {
data->msr0000020[i] = rdmsr_uint64_t(0x00000200 | i);
}
data->msr00000250 = rdmsr_uint64_t(0x00000250);
data->msr00000258 = rdmsr_uint64_t(0x00000258);
- for (i=0; i<8; i++)
+ for (i = 0; i < 8; i++)
data->msr0000026[i] = rdmsr_uint64_t(0x00000260 | (i + 8));
data->msr000002ff = rdmsr_uint64_t(0x000002ff);
data->msrc0010010 = rdmsr_uint64_t(0xc0010010);
@@ -393,7 +393,7 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
data->f2x204 = read_config32_dct(dev_fn2, node, channel, 0x204);
data->f2x208 = read_config32_dct(dev_fn2, node, channel, 0x208);
data->f2x20c = read_config32_dct(dev_fn2, node, channel, 0x20c);
- for (i=0; i<4; i++)
+ for (i = 0; i < 4; i++)
data->f2x210[i] = read_config32_dct_nbpstate(dev_fn2, node, channel, i, 0x210);
data->f2x214 = read_config32_dct(dev_fn2, node, channel, 0x214);
data->f2x218 = read_config32_dct(dev_fn2, node, channel, 0x218);
@@ -407,7 +407,7 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
data->f2x9cx0d0fe003 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe003);
data->f2x9cx0d0fe013 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe013);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
data->f2x9cx0d0f0_8_0_1f[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f001f | (i << 8));
data->f2x9cx0d0f201f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f201f);
data->f2x9cx0d0f211f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f211f);
@@ -419,11 +419,11 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
data->f2x9cx0d0fc11f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc11f);
data->f2x9cx0d0fc21f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc21f);
data->f2x9cx0d0f4009 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f4009);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
data->f2x9cx0d0f0_8_0_02[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0002 | (i << 8));
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
data->f2x9cx0d0f0_8_0_06[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0006 | (i << 8));
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
data->f2x9cx0d0f0_8_0_0a[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f000a | (i << 8));
data->f2x9cx0d0f2002 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2002);
@@ -450,7 +450,7 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
data->f2x9cx0d0fc031 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc031);
data->f2x9cx0d0fc131 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc131);
data->f2x9cx0d0fc231 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc231);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
data->f2x9cx0d0f0_0_f_31[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0031 | (i << 8));
data->f2x9cx0d0f8021 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8021);
@@ -463,8 +463,8 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
data->f2x94 = read_config32_dct(dev_fn2, node, channel, 0x94);
/* Stage 6 */
- for (i=0; i<9; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 9; i++)
+ for (j = 0; j < 3; j++)
data->f2x9cx0d0f0_f_8_0_0_8_4_0[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4));
data->f2x9cx00 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x00);
data->f2x9cx0a = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0a);
@@ -478,33 +478,33 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
data->f2x9cx0d0fe007 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe007);
/* Stage 10 */
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
data->f2x9cx10[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x10 + i);
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
data->f2x9cx20[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x20 + i);
- for (i=0; i<4; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 3; j++)
data->f2x9cx3_0_0_3_1[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, (0x01 + i) + (0x100 * j));
- for (i=0; i<4; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 3; j++)
data->f2x9cx3_0_0_7_5[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, (0x05 + i) + (0x100 * j));
data->f2x9cx0d = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
data->f2x9cx0d0f0_f_0_13[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0013 | (i << 8));
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
data->f2x9cx0d0f0_f_0_30[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0030 | (i << 8));
- for (i=0; i<4; i++)
+ for (i = 0; i < 4; i++)
data->f2x9cx0d0f2_f_0_30[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2030 | (i << 8));
- for (i=0; i<2; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 3; j++)
data->f2x9cx0d0f8_8_4_0[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4));
data->f2x9cx0d0f812f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f812f);
/* Stage 11 */
if (IS_ENABLED(CONFIG_DIMM_DDR3)) {
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
data->f2x9cx30[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x30 + i);
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
data->f2x9cx40[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x40 + i);
}
@@ -599,28 +599,28 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
continue;
/* Restore training parameters */
- for (i=0; i<4; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 3; j++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x01 + i) + (0x100 * j), data->f2x9cx3_0_0_3_1[i][j]);
- for (i=0; i<4; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 3; j++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x05 + i) + (0x100 * j), data->f2x9cx3_0_0_7_5[i][j]);
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x10 + i, data->f2x9cx10[i]);
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x20 + i, data->f2x9cx20[i]);
if (IS_ENABLED(CONFIG_DIMM_DDR3)) {
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x30 + i, data->f2x9cx30[i]);
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x40 + i, data->f2x9cx40[i]);
}
/* Restore MaxRdLatency */
if (is_fam15h()) {
- for (i=0; i<4; i++)
+ for (i = 0; i < 4; i++)
write_config32_dct_nbpstate(PCI_DEV(0, 0x18 + node, 2), node, channel, i, 0x210, data->f2x210[i]);
} else {
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x78, data->f2x78);
@@ -682,7 +682,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x11c, data->f2x11c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x1b0, data->f2x1b0);
write_config32_dct(PCI_DEV(0, 0x18 + node, 3), node, channel, 0x44, data->f3x44);
- for (i=0; i<16; i++) {
+ for (i = 0; i < 16; i++) {
wrmsr_uint64_t(0x00000200 | i, data->msr0000020[i]);
}
wrmsr_uint64_t(0x00000250, data->msr00000250);
@@ -692,7 +692,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
* destroying CAR while still executing from CAR!
* For now, skip restoration...
*/
- // for (i=0; i<8; i++)
+ // for (i = 0; i < 8; i++)
// wrmsr_uint64_t(0x00000260 | (i + 8), data->msr0000026[i]);
wrmsr_uint64_t(0x000002ff, data->msr000002ff);
wrmsr_uint64_t(0xc0010010, data->msrc0010010);
@@ -760,7 +760,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x204, data->f2x204);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x208, data->f2x208);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x20c, data->f2x20c);
- for (i=0; i<4; i++)
+ for (i = 0; i < 4; i++)
write_config32_dct_nbpstate(PCI_DEV(0, 0x18 + node, 2), node, channel, i, 0x210, data->f2x210[i]);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x214, data->f2x214);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x218, data->f2x218);
@@ -773,7 +773,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x240, data->f2x240);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe013, data->f2x9cx0d0fe013);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f001f | (i << 8), data->f2x9cx0d0f0_8_0_1f[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f201f, data->f2x9cx0d0f201f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f211f, data->f2x9cx0d0f211f);
@@ -795,7 +795,7 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc031, data->f2x9cx0d0fc031);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc131, data->f2x9cx0d0fc131);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc231, data->f2x9cx0d0fc231);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0031 | (i << 8), data->f2x9cx0d0f0_0_f_31[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8021, data->f2x9cx0d0f8021);
@@ -899,8 +899,8 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
if (!persistent_data->node[node].node_present)
continue;
- for (i=0; i<9; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 9; i++)
+ for (j = 0; j < 3; j++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4), data->f2x9cx0d0f0_f_8_0_0_8_4_0[i][j]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x00, data->f2x9cx00);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0a, data->f2x9cx0a);
@@ -920,11 +920,11 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
dword |= (0x3 << 13); /* DisAutoComp, DisablePredriverCal = 1 */
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe003, dword);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0006 | (i << 8), data->f2x9cx0d0f0_8_0_06[i]);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f000a | (i << 8), data->f2x9cx0d0f0_8_0_0a[i]);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0002 | (i << 8), (0x8000 | data->f2x9cx0d0f0_8_0_02[i]));
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8006, data->f2x9cx0d0f8006);
@@ -1024,25 +1024,25 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
if (!persistent_data->node[node].node_present)
continue;
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x10 + i, data->f2x9cx10[i]);
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x20 + i, data->f2x9cx20[i]);
- for (i=0; i<4; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 3; j++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x01 + i) + (0x100 * j), data->f2x9cx3_0_0_3_1[i][j]);
- for (i=0; i<4; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 3; j++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x05 + i) + (0x100 * j), data->f2x9cx3_0_0_7_5[i][j]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d, data->f2x9cx0d);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0013 | (i << 8), data->f2x9cx0d0f0_f_0_13[i]);
- for (i=0; i<9; i++)
+ for (i = 0; i < 9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0030 | (i << 8), data->f2x9cx0d0f0_f_0_30[i]);
- for (i=0; i<4; i++)
+ for (i = 0; i < 4; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f2030 | (i << 8), data->f2x9cx0d0f2_f_0_30[i]);
- for (i=0; i<2; i++)
- for (j=0; j<3; j++)
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 3; j++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4), data->f2x9cx0d0f8_8_4_0[i][j]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f812f, data->f2x9cx0d0f812f);
}
@@ -1056,9 +1056,9 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
if (!persistent_data->node[node].node_present)
continue;
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x30 + i, data->f2x9cx30[i]);
- for (i=0; i<12; i++)
+ for (i = 0; i < 12; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x40 + i, data->f2x9cx40[i]);
}
}