diff options
author | Elyes HAOUAS <ehaouas@noos.fr> | 2016-08-23 21:36:02 +0200 |
---|---|---|
committer | Martin Roth <martinroth@google.com> | 2016-08-31 20:28:51 +0200 |
commit | 5a7e72f1aef02b326a67d883d92fe8c0aad9f3a9 (patch) | |
tree | 8d51ad99d2d9469f195694b29a571facf18d89f8 /src/northbridge/amd | |
parent | 2b010b8795de84b6753c5e49d6a73c25fee96da1 (diff) |
northbridge/amd: Add required space before opening parenthesis '('
Change-Id: Ic85f725bbdf72fbac5a4d9482c61343c5eb35e25
Signed-off-by: Elyes HAOUAS <ehaouas@noos.fr>
Reviewed-on: https://review.coreboot.org/16305
Tested-by: build bot (Jenkins)
Reviewed-by: Martin Roth <martinroth@google.com>
Diffstat (limited to 'src/northbridge/amd')
74 files changed, 881 insertions, 881 deletions
diff --git a/src/northbridge/amd/agesa/common/common.c b/src/northbridge/amd/agesa/common/common.c index b86d274f58..ab8f687597 100644 --- a/src/northbridge/amd/agesa/common/common.c +++ b/src/northbridge/amd/agesa/common/common.c @@ -68,7 +68,7 @@ AGESA_STATUS common_ReadCbfsSpd (UINT32 Func, UINT32 Data, VOID *ConfigPtr) u16 i; printk(BIOS_SPEW, "\nDisplay the SPD"); for (i = 0; i < spd_file_length; i++) { - if((i % 16) == 0x00) + if ((i % 16) == 0x00) printk(BIOS_SPEW, "\n%02x: ",i); printk(BIOS_SPEW, "%02x ", info->Buffer[i]); } diff --git a/src/northbridge/amd/agesa/def_callouts.c b/src/northbridge/amd/agesa/def_callouts.c index cc733c4318..a2e12a9022 100644 --- a/src/northbridge/amd/agesa/def_callouts.c +++ b/src/northbridge/amd/agesa/def_callouts.c @@ -38,7 +38,7 @@ AGESA_STATUS GetBiosCallout (UINT32 Func, UINTN Data, VOID *ConfigPtr) if (BiosCallouts[i].CalloutName == Func) break; } - if(i >= BiosCalloutsLen) + if (i >= BiosCalloutsLen) return AGESA_UNSUPPORTED; return BiosCallouts[i].CalloutPtr (Func, Data, ConfigPtr); diff --git a/src/northbridge/amd/agesa/family10/northbridge.c b/src/northbridge/amd/agesa/family10/northbridge.c index 4c98ac3344..1c8f904621 100644 --- a/src/northbridge/amd/agesa/family10/northbridge.c +++ b/src/northbridge/amd/agesa/family10/northbridge.c @@ -251,7 +251,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -339,7 +339,7 @@ static struct resource *amdfam10_find_iopair(device_t dev, unsigned nodeid, unsi } //Ext conf space - if(!reg) { + if (!reg) { //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range u32 index = get_io_addr_index(nodeid, link); reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255 @@ -507,7 +507,7 @@ static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate ) ) #endif @@ -814,7 +814,7 @@ static void amdfam10_domain_set_resources(device_t dev) set_top_of_ram(ramtop); #endif - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } @@ -1043,7 +1043,7 @@ static void cpu_bus_scan(device_t dev) * ensure all of the cpu's pci devices are found. */ int fn; - for(fn = 0; fn <= 5; fn++) { //FBDIMM? + for (fn = 0; fn <= 5; fn++) { //FBDIMM? cdb_dev = pci_probe_dev(NULL, pbus, PCI_DEVFN(devn, fn)); } diff --git a/src/northbridge/amd/agesa/family12/amdfam12_conf.c b/src/northbridge/amd/agesa/family12/amdfam12_conf.c index 04ad5261f2..7afa39df03 100644 --- a/src/northbridge/amd/agesa/family12/amdfam12_conf.c +++ b/src/northbridge/amd/agesa/family12/amdfam12_conf.c @@ -54,12 +54,12 @@ static void set_ht_c_io_addr_reg(u32 nodeid, u32 linkn, u32 ht_c_index, /* io range allocation */ tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit - for(i=0; i<nodes; i++) { + for (i=0; i<nodes; i++) { dev = NODE_PCI(i, 1); pci_write_config32(dev, 0xC4 + ht_c_index * 8, tempreg); } tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ? - for(i=0; i<nodes; i++){ + for (i=0; i<nodes; i++){ dev = NODE_PCI(i, 1); pci_write_config32(dev, 0xC0 + ht_c_index * 8, tempreg); } @@ -73,7 +73,7 @@ static void clear_ht_c_io_addr_reg(u32 nodeid, u32 linkn, u32 ht_c_index, device_t dev; /* io range allocation */ - for(i=0; i<nodes; i++) { + for (i=0; i<nodes; i++) { dev = NODE_PCI(i, 1); pci_write_config32(dev, 0xC4 + ht_c_index * 8, 0); pci_write_config32(dev, 0xC0 + ht_c_index * 8, 0); @@ -87,8 +87,8 @@ static u32 get_io_addr_index(u32 nodeid, u32 linkn) #if 0 u32 index; - for(index=0; index<256; index++) { - if(sysconf.conf_io_addrx[index+4] == 0) { + for (index=0; index<256; index++) { + if (sysconf.conf_io_addrx[index+4] == 0) { sysconf.conf_io_addr[index+4] = (nodeid & 0x3f) ; sysconf.conf_io_addrx[index+4] = 1 | ((linkn & 0x7)<<4); return index; @@ -103,8 +103,8 @@ static u32 get_mmio_addr_index(u32 nodeid, u32 linkn) #if 0 u32 index; - for(index=0; index<64; index++) { - if(sysconf.conf_mmio_addrx[index+8] == 0) { + for (index=0; index<64; index++) { + if (sysconf.conf_mmio_addrx[index+8] == 0) { sysconf.conf_mmio_addr[index+8] = (nodeid & 0x3f) ; sysconf.conf_mmio_addrx[index+8] = 1 | ((linkn & 0x7)<<4); return index; diff --git a/src/northbridge/amd/agesa/family12/northbridge.c b/src/northbridge/amd/agesa/family12/northbridge.c index a36f47e65a..df6b3e4f93 100644 --- a/src/northbridge/amd/agesa/family12/northbridge.c +++ b/src/northbridge/amd/agesa/family12/northbridge.c @@ -55,7 +55,7 @@ static device_t get_node_pci(u32 nodeid, u32 fn) static void get_fx_devs(void) { int i; - for(i = 0; i < FX_DEVS; i++) { + for (i = 0; i < FX_DEVS; i++) { __f0_dev[i] = get_node_pci(i, 0); __f1_dev[i] = get_node_pci(i, 1); __f2_dev[i] = get_node_pci(i, 2); @@ -81,7 +81,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev = __f1_dev[i]; if (dev && dev->enabled) { pci_write_config32(dev, reg, value); @@ -125,11 +125,11 @@ static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid, int result; printk(BIOS_DEBUG, "\nFam12h - northbridge.c - %s - Start.\n",__func__); res = 0; - for(nodeid = 0; !res && (nodeid < fx_devs); nodeid++) { + for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) { device_t dev = __f0_dev[nodeid]; if (!dev) continue; - for(link = 0; !res && (link < 8); link++) { + for (link = 0; !res && (link < 8); link++) { res = probe_resource(dev, IOINDEX(0x1000 + reg, link)); } } @@ -159,7 +159,7 @@ static struct resource *amdfam12_find_iopair(device_t dev, unsigned nodeid, unsi } //Ext conf space - if(!reg) { + if (!reg) { //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range u32 index = get_io_addr_index(nodeid, link); reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255 @@ -176,7 +176,7 @@ static struct resource *amdfam12_find_mempair(device_t dev, u32 nodeid, u32 link u32 free_reg, reg; resource = 0; free_reg = 0; - for(reg = 0x80; reg <= 0xb8; reg += 0x8) { + for (reg = 0x80; reg <= 0xb8; reg += 0x8) { int result; result = reg_useable(reg, dev, nodeid, link); if (result == 1) { @@ -193,7 +193,7 @@ static struct resource *amdfam12_find_mempair(device_t dev, u32 nodeid, u32 link } //Ext conf space - if(!reg) { + if (!reg) { //because of Extend conf space, we will never run out of reg, // but we need one index to differ them. so same node and // same link can have multi range @@ -277,9 +277,9 @@ static struct hw_mem_hole_info get_hw_mem_hole_info(void) struct dram_base_mask_t d; u32 hole; d = get_dram_base_mask(0); - if(d.mask & 1) { + if (d.mask & 1) { hole = pci_read_config32(__f1_dev[0], 0xf0); - if(hole & 1) { // we find the hole + if (hole & 1) { // we find the hole mem_hole.hole_startk = (hole & (0xff<<24)) >> 10; mem_hole.node_id = 0; // record the node No with hole } @@ -289,15 +289,15 @@ static struct hw_mem_hole_info get_hw_mem_hole_info(void) /* We need to double check if there is special set on base reg and limit reg * are not continuous instead of hole, it will find out its hole_startk. */ - if(mem_hole.node_id==-1) { + if (mem_hole.node_id==-1) { resource_t limitk_pri = 0; struct dram_base_mask_t d; resource_t base_k, limit_k; d = get_dram_base_mask(0); - if(d.base & 1) { + if (d.base & 1) { base_k = ((resource_t)(d.base & 0x1fffff00)) <<9; - if(base_k <= 4 *1024 * 1024) { - if(limitk_pri != base_k) { // we find the hole + if (base_k <= 4 *1024 * 1024) { + if (limitk_pri != base_k) { // we find the hole mem_hole.hole_startk = (unsigned)limitk_pri; // must be below 4G mem_hole.node_id = 0; } @@ -321,7 +321,7 @@ static void read_resources(device_t dev) printk(BIOS_DEBUG, "\nFam12h - northbridge.c - %s - Start.\n",__func__); nodeid = amdfam12_nodeid(dev); - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { amdfam12_link_read_bases(dev, nodeid, link->link_num); } @@ -397,7 +397,7 @@ printk(BIOS_DEBUG, "\nFam12h - northbridge.c - %s - Start.\n",__func__); printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate )) #endif break; // XXX this break looks questionable @@ -428,11 +428,11 @@ static void set_resources(device_t dev) create_vga_resource(dev, nodeid); /* Set each resource we have found */ - for(res = dev->resource_list; res; res = res->next) { + for (res = dev->resource_list; res; res = res->next) { set_resource(dev, res, nodeid); } - for(bus = dev->link_list; bus; bus = bus->next) { + for (bus = dev->link_list; bus; bus = bus->next) { if (bus->children) assign_resources(bus); } @@ -477,7 +477,7 @@ static void domain_read_resources(device_t dev) /* Find the already assigned resource pairs */ get_fx_devs(); - for(reg = 0x80; reg <= 0xc0; reg+= 0x08) { + for (reg = 0x80; reg <= 0xc0; reg+= 0x08) { u32 base, limit; base = f1_read_config32(reg); limit = f1_read_config32(reg + 0x04); @@ -485,7 +485,7 @@ static void domain_read_resources(device_t dev) if ((base & 3) != 0) { unsigned nodeid, reg_link; device_t reg_dev; - if(reg<0xc0) { // mmio + if (reg<0xc0) { // mmio nodeid = (limit & 0xf) + (base&0x30); } else { // io nodeid = (limit & 0xf) + ((base>>4)&0x30); @@ -540,7 +540,7 @@ static void domain_set_resources(device_t dev) #endif pci_tolm = 0xffffffffUL; - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { pci_tolm = my_find_pci_tolm(link, pci_tolm); } @@ -608,7 +608,7 @@ printk(BIOS_DEBUG, "adsr - 0xa0000 to 0xbffff resource.\n"); if (basek <= mmio_basek) { unsigned pre_sizek; pre_sizek = mmio_basek - basek; - if(pre_sizek>0) { + if (pre_sizek>0) { ram_resource(dev, idx, basek, pre_sizek); idx += 0x10; sizek -= pre_sizek; @@ -642,7 +642,7 @@ printk(BIOS_DEBUG, "adsr - 0xa0000 to 0xbffff resource.\n"); set_top_of_ram(ramtop); #endif - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } diff --git a/src/northbridge/amd/agesa/family14/amdfam14_conf.c b/src/northbridge/amd/agesa/family14/amdfam14_conf.c index 1a387f60af..6db2b95771 100644 --- a/src/northbridge/amd/agesa/family14/amdfam14_conf.c +++ b/src/northbridge/amd/agesa/family14/amdfam14_conf.c @@ -54,12 +54,12 @@ static void set_ht_c_io_addr_reg(u32 nodeid, u32 linkn, u32 ht_c_index, /* io range allocation */ tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit - for(i=0; i<nodes; i++) { + for (i=0; i<nodes; i++) { dev = NODE_PCI(i, 1); pci_write_config32(dev, 0xC4 + ht_c_index * 8, tempreg); } tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ? - for(i=0; i<nodes; i++){ + for (i=0; i<nodes; i++){ dev = NODE_PCI(i, 1); pci_write_config32(dev, 0xC0 + ht_c_index * 8, tempreg); } @@ -73,7 +73,7 @@ static void clear_ht_c_io_addr_reg(u32 nodeid, u32 linkn, u32 ht_c_index, device_t dev; /* io range allocation */ - for(i=0; i<nodes; i++) { + for (i=0; i<nodes; i++) { dev = NODE_PCI(i, 1); pci_write_config32(dev, 0xC4 + ht_c_index * 8, 0); pci_write_config32(dev, 0xC0 + ht_c_index * 8, 0); @@ -87,8 +87,8 @@ static u32 get_io_addr_index(u32 nodeid, u32 linkn) #if 0 u32 index; - for(index=0; index<256; index++) { - if(sysconf.conf_io_addrx[index+4] == 0) { + for (index=0; index<256; index++) { + if (sysconf.conf_io_addrx[index+4] == 0) { sysconf.conf_io_addr[index+4] = (nodeid & 0x3f) ; sysconf.conf_io_addrx[index+4] = 1 | ((linkn & 0x7)<<4); return index; @@ -103,8 +103,8 @@ static u32 get_mmio_addr_index(u32 nodeid, u32 linkn) #if 0 u32 index; - for(index=0; index<64; index++) { - if(sysconf.conf_mmio_addrx[index+8] == 0) { + for (index=0; index<64; index++) { + if (sysconf.conf_mmio_addrx[index+8] == 0) { sysconf.conf_mmio_addr[index+8] = (nodeid & 0x3f) ; sysconf.conf_mmio_addrx[index+8] = 1 | ((linkn & 0x7)<<4); return index; diff --git a/src/northbridge/amd/agesa/family15/northbridge.c b/src/northbridge/amd/agesa/family15/northbridge.c index 0079a7803b..f347b63409 100644 --- a/src/northbridge/amd/agesa/family15/northbridge.c +++ b/src/northbridge/amd/agesa/family15/northbridge.c @@ -153,7 +153,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -410,7 +410,7 @@ static void create_vga_resource(device_t dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate ) ) #endif @@ -861,7 +861,7 @@ static void domain_set_resources(device_t dev) set_top_of_ram(ramtop); #endif - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } @@ -1033,7 +1033,7 @@ static void cpu_bus_scan(device_t dev) * ensure all of the cpu's pci devices are found. */ int fn; - for(fn = 0; fn <= 5; fn++) { //FBDIMM? + for (fn = 0; fn <= 5; fn++) { //FBDIMM? cdb_dev = pci_probe_dev(NULL, pbus, PCI_DEVFN(devn, fn)); } diff --git a/src/northbridge/amd/agesa/family15rl/northbridge.c b/src/northbridge/amd/agesa/family15rl/northbridge.c index 511b34e49b..ccb256b6b7 100644 --- a/src/northbridge/amd/agesa/family15rl/northbridge.c +++ b/src/northbridge/amd/agesa/family15rl/northbridge.c @@ -153,7 +153,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -410,7 +410,7 @@ static void create_vga_resource(struct device *dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate ) ) #endif @@ -856,7 +856,7 @@ static void domain_set_resources(struct device *dev) set_top_of_ram(ramtop); #endif - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } @@ -1021,7 +1021,7 @@ static void cpu_bus_scan(device_t dev) * ensure all of the cpu's pci devices are found. */ int fn; - for(fn = 0; fn <= 5; fn++) { //FBDIMM? + for (fn = 0; fn <= 5; fn++) { //FBDIMM? cdb_dev = pci_probe_dev(NULL, pbus, PCI_DEVFN(devn, fn)); } diff --git a/src/northbridge/amd/agesa/family15tn/northbridge.c b/src/northbridge/amd/agesa/family15tn/northbridge.c index 576334d28e..63ca2af5b1 100644 --- a/src/northbridge/amd/agesa/family15tn/northbridge.c +++ b/src/northbridge/amd/agesa/family15tn/northbridge.c @@ -152,7 +152,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -409,7 +409,7 @@ static void create_vga_resource(device_t dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate ) ) #endif @@ -855,7 +855,7 @@ static void domain_set_resources(device_t dev) set_top_of_ram(ramtop); #endif - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } @@ -1020,7 +1020,7 @@ static void cpu_bus_scan(device_t dev) * ensure all of the cpu's pci devices are found. */ int fn; - for(fn = 0; fn <= 5; fn++) { //FBDIMM? + for (fn = 0; fn <= 5; fn++) { //FBDIMM? cdb_dev = pci_probe_dev(NULL, pbus, PCI_DEVFN(devn, fn)); } diff --git a/src/northbridge/amd/agesa/family16kb/northbridge.c b/src/northbridge/amd/agesa/family16kb/northbridge.c index 25bb337b5d..c0769e725d 100644 --- a/src/northbridge/amd/agesa/family16kb/northbridge.c +++ b/src/northbridge/amd/agesa/family16kb/northbridge.c @@ -152,7 +152,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -409,7 +409,7 @@ static void create_vga_resource(device_t dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate ) ) #endif @@ -872,7 +872,7 @@ static void domain_set_resources(device_t dev) set_top_of_ram(ramtop); #endif - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } @@ -1037,7 +1037,7 @@ static void cpu_bus_scan(device_t dev) * ensure all of the cpu's pci devices are found. */ int fn; - for(fn = 0; fn <= 5; fn++) { //FBDIMM? + for (fn = 0; fn <= 5; fn++) { //FBDIMM? cdb_dev = pci_probe_dev(NULL, pbus, PCI_DEVFN(devn, fn)); } diff --git a/src/northbridge/amd/amdfam10/acpi.c b/src/northbridge/amd/amdfam10/acpi.c index d4ad9a409d..51da7d6c88 100644 --- a/src/northbridge/amd/amdfam10/acpi.c +++ b/src/northbridge/amd/amdfam10/acpi.c @@ -31,7 +31,7 @@ unsigned long acpi_create_madt_lapic_nmis(unsigned long current, u16 flags, u8 l device_t cpu; int cpu_index = 0; - for(cpu = all_devices; cpu; cpu = cpu->next) { + for (cpu = all_devices; cpu; cpu = cpu->next) { if ((cpu->path.type != DEVICE_PATH_APIC) || (cpu->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) { continue; @@ -50,7 +50,7 @@ unsigned long acpi_create_srat_lapics(unsigned long current) device_t cpu; int cpu_index = 0; - for(cpu = all_devices; cpu; cpu = cpu->next) { + for (cpu = all_devices; cpu; cpu = cpu->next) { if ((cpu->path.type != DEVICE_PATH_APIC) || (cpu->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) { continue; @@ -94,9 +94,9 @@ static void set_srat_mem(void *gp, struct device *dev, struct resource *res) * next range is from 1M--- * So will cut off before 1M in the mem range */ - if((basek+sizek)<1024) return; + if ((basek+sizek)<1024) return; - if(basek<1024) { + if (basek<1024) { sizek -= 1024 - basek; basek = 1024; } @@ -158,9 +158,9 @@ static unsigned long acpi_fill_slit(unsigned long current) *p = (u8) nodes; p += 8; - for(i=0;i<nodes;i++) { - for(j=0;j<nodes; j++) { - if(i==j) + for (i=0;i<nodes;i++) { + for (j=0;j<nodes; j++) { + if (i==j) p[i*nodes+j] = 10; else p[i*nodes+j] = 16; @@ -221,7 +221,7 @@ void northbridge_acpi_write_vars(device_t device) acpigen_write_name("BUSN"); acpigen_write_package(HC_NUMS); - for(i=0; i<HC_NUMS; i++) { + for (i=0; i<HC_NUMS; i++) { acpigen_write_dword(sysconf.ht_c_conf_bus[i]); } // minus the opcode @@ -231,7 +231,7 @@ void northbridge_acpi_write_vars(device_t device) acpigen_write_package(HC_NUMS * 4); - for(i=0;i<(HC_NUMS*2);i++) { // FIXME: change to more chain + for (i=0;i<(HC_NUMS*2);i++) { // FIXME: change to more chain acpigen_write_dword(sysconf.conf_mmio_addrx[i]); //base acpigen_write_dword(sysconf.conf_mmio_addr[i]); //mask } @@ -242,7 +242,7 @@ void northbridge_acpi_write_vars(device_t device) acpigen_write_package(HC_NUMS * 2); - for(i=0;i<HC_NUMS;i++) { // FIXME: change to more chain + for (i=0;i<HC_NUMS;i++) { // FIXME: change to more chain acpigen_write_dword(sysconf.conf_io_addrx[i]); acpigen_write_dword(sysconf.conf_io_addr[i]); } @@ -273,10 +273,10 @@ void northbridge_acpi_write_vars(device_t device) acpigen_write_package(HC_POSSIBLE_NUM); - for(i=0;i<sysconf.hc_possible_num;i++) { + for (i=0;i<sysconf.hc_possible_num;i++) { acpigen_write_dword(sysconf.pci1234[i]); } - for(i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8 + for (i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8 acpigen_write_dword(0x00000000); } // minus the opcode @@ -286,10 +286,10 @@ void northbridge_acpi_write_vars(device_t device) acpigen_write_package(HC_POSSIBLE_NUM); - for(i=0;i<sysconf.hc_possible_num;i++) { + for (i=0;i<sysconf.hc_possible_num;i++) { acpigen_write_dword(sysconf.hcdn[i]); } - for(i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8 + for (i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8 acpigen_write_dword(0x20202020); } // minus the opcode @@ -299,10 +299,10 @@ void northbridge_acpi_write_vars(device_t device) u8 CBST, CBB2, CBS2; - if(CONFIG_CBB == 0xff) { + if (CONFIG_CBB == 0xff) { CBST = (u8) (0x0f); } else { - if((sysconf.pci1234[0] >> 12) & 0xff) { //sb chain on other than bus 0 + if ((sysconf.pci1234[0] >> 12) & 0xff) { //sb chain on other than bus 0 CBST = (u8) (0x0f); } else { CBST = (u8) (0x00); @@ -311,7 +311,7 @@ void northbridge_acpi_write_vars(device_t device) acpigen_write_name_byte("CBST", CBST); - if((CONFIG_CBB == 0xff) && (sysconf.nodes>32)) { + if ((CONFIG_CBB == 0xff) && (sysconf.nodes>32)) { CBS2 = 0x0f; CBB2 = (u8)(CONFIG_CBB-1); } else { diff --git a/src/northbridge/amd/amdfam10/amdfam10_util.asl b/src/northbridge/amd/amdfam10/amdfam10_util.asl index e10efb4623..bf0d177e14 100644 --- a/src/northbridge/amd/amdfam10/amdfam10_util.asl +++ b/src/northbridge/amd/amdfam10/amdfam10_util.asl @@ -66,7 +66,7 @@ Scope (\_SB) Method (GHCE, 1, NotSerialized) // check if the HC enabled { Store (DerefOf (Index (\_SB.PCI0.HCLK, Arg0)), Local1) - if(LEqual ( And(Local1, 0x01), 0x01)) { Return (0x0F) } + if (LEqual ( And(Local1, 0x01), 0x01)) { Return (0x0F) } Else { Return (0x00) } } diff --git a/src/northbridge/amd/amdfam10/debug.c b/src/northbridge/amd/amdfam10/debug.c index 27bd3319fe..f8e535851a 100644 --- a/src/northbridge/amd/amdfam10/debug.c +++ b/src/northbridge/amd/amdfam10/debug.c @@ -35,7 +35,7 @@ static void print_debug_pci_dev(u32 dev) static inline void print_pci_devices(void) { device_t dev; - for(dev = PCI_DEV(0, 0, 0); + for (dev = PCI_DEV(0, 0, 0); dev <= PCI_DEV(0xff, 0x1f, 0x7); dev += PCI_DEV(0,0,1)) { u32 id; @@ -47,10 +47,10 @@ static inline void print_pci_devices(void) } print_debug_pci_dev(dev); printk(BIOS_DEBUG, " %04x:%04x\n", (id & 0xffff), (id>>16)); - if(((dev>>12) & 0x07) == 0) { + if (((dev>>12) & 0x07) == 0) { u8 hdr_type; hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE); - if((hdr_type & 0x80) != 0x80) { + if ((hdr_type & 0x80) != 0x80) { dev += PCI_DEV(0,0,7); } } @@ -60,7 +60,7 @@ static inline void print_pci_devices(void) static inline void print_pci_devices_on_bus(u32 busn) { device_t dev; - for(dev = PCI_DEV(busn, 0, 0); + for (dev = PCI_DEV(busn, 0, 0); dev <= PCI_DEV(busn, 0x1f, 0x7); dev += PCI_DEV(0,0,1)) { u32 id; @@ -72,10 +72,10 @@ static inline void print_pci_devices_on_bus(u32 busn) } print_debug_pci_dev(dev); printk(BIOS_DEBUG, " %04x:%04x\n", (id & 0xffff), (id>>16)); - if(((dev>>12) & 0x07) == 0) { + if (((dev>>12) & 0x07) == 0) { u8 hdr_type; hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE); - if((hdr_type & 0x80) != 0x80) { + if ((hdr_type & 0x80) != 0x80) { dev += PCI_DEV(0,0,7); } } @@ -89,13 +89,13 @@ static void dump_pci_device_range(u32 dev, u32 start_reg, u32 size) int j; int end = start_reg + size; - for(i = start_reg; i < end; i+=4) { + for (i = start_reg; i < end; i+=4) { u32 val; if ((i & 0x0f) == 0) { printk(BIOS_DEBUG, "\n%04x:",i); } val = pci_read_config32(dev, i); - for(j=0;j<4;j++) { + for (j=0;j<4;j++) { printk(BIOS_DEBUG, " %02x", val & 0xff); val >>= 8; } @@ -116,12 +116,12 @@ static void dump_pci_device_index_wait_range(u32 dev, u32 index_reg, u32 start, print_debug_pci_dev(dev); printk(BIOS_DEBUG, " -- index_reg=%08x", index_reg); - for(i = start; i < end; i++) { + for (i = start; i < end; i++) { u32 val; int j; printk(BIOS_DEBUG, "\n%02x:",i); val = pci_read_config32_index_wait(dev, index_reg, i); - for(j=0;j<4;j++) { + for (j=0;j<4;j++) { printk(BIOS_DEBUG, " %02x", val & 0xff); val >>= 8; } @@ -147,7 +147,7 @@ static inline void dump_pci_device_index(u32 dev, u32 index_reg, u32 type, u32 l type<<=28; - for(i = 0; i < length; i++) { + for (i = 0; i < length; i++) { u32 val; if ((i & 0x0f) == 0) { printk(BIOS_DEBUG, "\n%02x:",i); @@ -161,7 +161,7 @@ static inline void dump_pci_device_index(u32 dev, u32 index_reg, u32 type, u32 l static inline void dump_pci_devices(void) { device_t dev; - for(dev = PCI_DEV(0, 0, 0); + for (dev = PCI_DEV(0, 0, 0); dev <= PCI_DEV(0xff, 0x1f, 0x7); dev += PCI_DEV(0,0,1)) { u32 id; @@ -173,10 +173,10 @@ static inline void dump_pci_devices(void) } dump_pci_device(dev); - if(((dev>>12) & 0x07) == 0) { + if (((dev>>12) & 0x07) == 0) { u8 hdr_type; hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE); - if((hdr_type & 0x80) != 0x80) { + if ((hdr_type & 0x80) != 0x80) { dev += PCI_DEV(0,0,7); } } @@ -186,7 +186,7 @@ static inline void dump_pci_devices(void) static inline void dump_pci_devices_on_bus(u32 busn) { device_t dev; - for(dev = PCI_DEV(busn, 0, 0); + for (dev = PCI_DEV(busn, 0, 0); dev <= PCI_DEV(busn, 0x1f, 0x7); dev += PCI_DEV(0,0,1)) { u32 id; @@ -198,10 +198,10 @@ static inline void dump_pci_devices_on_bus(u32 busn) } dump_pci_device(dev); - if(((dev>>12) & 0x07) == 0) { + if (((dev>>12) & 0x07) == 0) { u8 hdr_type; hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE); - if((hdr_type & 0x80) != 0x80) { + if ((hdr_type & 0x80) != 0x80) { dev += PCI_DEV(0,0,7); } } @@ -214,13 +214,13 @@ static void dump_spd_registers(const struct mem_controller *ctrl) { int i; printk(BIOS_DEBUG, "\n"); - for(i = 0; i < DIMM_SOCKETS; i++) { + for (i = 0; i < DIMM_SOCKETS; i++) { u32 device; device = ctrl->spd_addr[i]; if (device) { int j; printk(BIOS_DEBUG, "dimm: %02x.0: %02x", i, device); - for(j = 0; j < 128; j++) { + for (j = 0; j < 128; j++) { int status; u8 byte; if ((j & 0xf) == 0) { @@ -239,7 +239,7 @@ static void dump_spd_registers(const struct mem_controller *ctrl) if (device) { int j; printk(BIOS_DEBUG, "dimm: %02x.1: %02x", i, device); - for(j = 0; j < 128; j++) { + for (j = 0; j < 128; j++) { int status; u8 byte; if ((j & 0xf) == 0) { @@ -260,11 +260,11 @@ static void dump_smbus_registers(void) { u32 device; printk(BIOS_DEBUG, "\n"); - for(device = 1; device < 0x80; device++) { + for (device = 1; device < 0x80; device++) { int j; - if( smbus_read_byte(device, 0) < 0 ) continue; + if ( smbus_read_byte(device, 0) < 0 ) continue; printk(BIOS_DEBUG, "smbus: %02x", device); - for(j = 0; j < 256; j++) { + for (j = 0; j < 256; j++) { int status; u8 byte; status = smbus_read_byte(device, j); @@ -287,7 +287,7 @@ static inline void dump_io_resources(u32 port) int i; udelay(2000); printk(BIOS_DEBUG, "%04x:\n", port); - for(i=0;i<256;i++) { + for (i=0;i<256;i++) { u8 val; if ((i & 0x0f) == 0) { printk(BIOS_DEBUG, "%02x:", i); @@ -305,8 +305,8 @@ static inline void dump_mem(u32 start, u32 end) { u32 i; printk(BIOS_DEBUG, "dump_mem:"); - for(i=start;i<end;i++) { - if((i & 0xf)==0) { + for (i=start;i<end;i++) { + if ((i & 0xf)==0) { printk(BIOS_DEBUG, "\n%08x:", i); } printk(BIOS_DEBUG, " %02x", (u8)*((u8 *)i)); diff --git a/src/northbridge/amd/amdfam10/early_ht.c b/src/northbridge/amd/amdfam10/early_ht.c index 3e59a324e7..57c992c565 100644 --- a/src/northbridge/amd/amdfam10/early_ht.c +++ b/src/northbridge/amd/amdfam10/early_ht.c @@ -79,7 +79,7 @@ static void enumerate_ht_chain(void) { pos = pci_io_read_config8(PCI_DEV(0,0,0), PCI_CAPABILITY_LIST); } - while(pos != 0) { + while (pos != 0) { u8 cap; cap = pci_io_read_config8(PCI_DEV(0,0,0), pos + PCI_CAP_LIST_ID); if (cap == PCI_CAP_ID_HT) { @@ -96,8 +96,8 @@ static void enumerate_ht_chain(void) device_t devx; #if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20 - if(next_unitid>=0x18) { - if(!end_used) { + if (next_unitid>=0x18) { + if (!end_used) { next_unitid = CONFIG_HT_CHAIN_END_UNITID_BASE; end_used = 1; } else { @@ -147,18 +147,18 @@ static void enumerate_ht_chain(void) break; } } - } while((ctrl & (1 << 5)) == 0); + } while ((ctrl & (1 << 5)) == 0); break; } } pos = pci_io_read_config8(PCI_DEV(0, 0, 0), pos + PCI_CAP_LIST_NEXT); } - } while(last_unitid != next_unitid); + } while (last_unitid != next_unitid); out: ; #if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20 - if((ht_dev_num>1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used) { + if ((ht_dev_num>1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used) { u16 flags; flags = pci_io_read_config16(PCI_DEV(0,real_last_unitid,0), real_last_pos + PCI_CAP_FLAGS); flags &= ~0x1f; diff --git a/src/northbridge/amd/amdfam10/get_pci1234.c b/src/northbridge/amd/amdfam10/get_pci1234.c index 41c2b33152..a6f679ecad 100644 --- a/src/northbridge/amd/amdfam10/get_pci1234.c +++ b/src/northbridge/amd/amdfam10/get_pci1234.c @@ -73,29 +73,29 @@ void get_pci1234(void) //2. so at the same time we need update hsdn with hcdn_reg here // printk(BIOS_DEBUG, "sysconf.ht_c_num = %02d\n", sysconf.ht_c_num); - for(j=0;j<sysconf.ht_c_num;j++) { + for (j=0;j<sysconf.ht_c_num;j++) { u32 dwordx; dwordx = sysconf.ht_c_conf_bus[j]; // printk(BIOS_DEBUG, "sysconf.ht_c_conf_bus[%02d] = %08x\n", j, sysconf.ht_c_conf_bus[j]); dwordx &=0xfffffffd; //keep bus num, node_id, link_num, enable bits - if((dwordx & 0x7fd) == dword) { //SBLINK + if ((dwordx & 0x7fd) == dword) { //SBLINK sysconf.pci1234[0] = dwordx; sysconf.hcdn[0] = sysconf.hcdn_reg[j]; continue; } - if((dwordx & 1)) { + if ((dwordx & 1)) { // We need to find out the number of HC // for exact match - for(i=1;i<sysconf.hc_possible_num;i++) { - if((dwordx & 0x7fc) == (sysconf.pci1234[i] & 0x7fc)) { // same node and same linkn + for (i=1;i<sysconf.hc_possible_num;i++) { + if ((dwordx & 0x7fc) == (sysconf.pci1234[i] & 0x7fc)) { // same node and same linkn sysconf.pci1234[i] = dwordx; sysconf.hcdn[i] = sysconf.hcdn_reg[j]; break; } } // for 0xffc match or same node - for(i=1;i<sysconf.hc_possible_num;i++) { - if((dwordx & 0x7fc) == (dwordx & sysconf.pci1234[i] & 0x7fc)) { + for (i=1;i<sysconf.hc_possible_num;i++) { + if ((dwordx & 0x7fc) == (dwordx & sysconf.pci1234[i] & 0x7fc)) { sysconf.pci1234[i] = dwordx; sysconf.hcdn[i] = sysconf.hcdn_reg[j]; break; @@ -104,8 +104,8 @@ void get_pci1234(void) } } - for(i=1;i<sysconf.hc_possible_num;i++) { - if(!(sysconf.pci1234[i] & 1)) { + for (i=1;i<sysconf.hc_possible_num;i++) { + if (!(sysconf.pci1234[i] & 1)) { sysconf.pci1234[i] = 0; sysconf.hcdn[i] = 0x20202020; } diff --git a/src/northbridge/amd/amdfam10/northbridge.c b/src/northbridge/amd/amdfam10/northbridge.c index 19acab61db..42647b1e3a 100644 --- a/src/northbridge/amd/amdfam10/northbridge.c +++ b/src/northbridge/amd/amdfam10/northbridge.c @@ -70,7 +70,7 @@ static unsigned fx_devs=0; device_t get_node_pci(u32 nodeid, u32 fn) { #if NODE_NUMS + CONFIG_CDB >= 32 - if((CONFIG_CDB + nodeid) < 32) { + if ((CONFIG_CDB + nodeid) < 32) { return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn)); } else { return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn)); @@ -99,7 +99,7 @@ static inline uint8_t is_fam15h(void) static void get_fx_devs(void) { int i; - for(i = 0; i < FX_DEVS; i++) { + for (i = 0; i < FX_DEVS; i++) { __f0_dev[i] = get_node_pci(i, 0); __f1_dev[i] = get_node_pci(i, 1); __f2_dev[i] = get_node_pci(i, 2); @@ -124,7 +124,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -138,7 +138,7 @@ u32 amdfam10_nodeid(device_t dev) #if NODE_NUMS == 64 unsigned busn; busn = dev->bus->secondary; - if(busn != CONFIG_CBB) { + if (busn != CONFIG_CBB) { return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32; } else { return (dev->path.pci.devfn >> 3) - CONFIG_CDB; @@ -395,12 +395,12 @@ static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid, unsigned nodeid, link = 0; int result; res = 0; - for(nodeid = 0; !res && (nodeid < fx_devs); nodeid++) { + for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) { device_t dev; dev = __f0_dev[nodeid]; if (!dev) continue; - for(link = 0; !res && (link < 8); link++) { + for (link = 0; !res && (link < 8); link++) { res = probe_resource(dev, IOINDEX(0x1000 + reg, link)); } } @@ -422,7 +422,7 @@ static struct resource *amdfam10_find_iopair(device_t dev, unsigned nodeid, unsi u32 free_reg, reg; resource = 0; free_reg = 0; - for(reg = 0xc0; reg <= 0xd8; reg += 0x8) { + for (reg = 0xc0; reg <= 0xd8; reg += 0x8) { int result; result = reg_useable(reg, dev, nodeid, link); if (result == 1) { @@ -438,7 +438,7 @@ static struct resource *amdfam10_find_iopair(device_t dev, unsigned nodeid, unsi } //Ext conf space - if(!reg) { + if (!reg) { //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range u32 index = get_io_addr_index(nodeid, link); reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255 @@ -455,7 +455,7 @@ static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link u32 free_reg, reg; resource = 0; free_reg = 0; - for(reg = 0x80; reg <= 0xb8; reg += 0x8) { + for (reg = 0x80; reg <= 0xb8; reg += 0x8) { int result; result = reg_useable(reg, dev, nodeid, link); if (result == 1) { @@ -471,7 +471,7 @@ static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link } //Ext conf space - if(!reg) { + if (!reg) { //because of Extend conf space, we will never run out of reg, // but we need one index to differ them. so same node and // same link can have multi range @@ -530,7 +530,7 @@ static void amdfam10_read_resources(device_t dev) u32 nodeid; struct bus *link; nodeid = amdfam10_nodeid(dev); - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { amdfam10_link_read_bases(dev, nodeid, link->link_num); } @@ -605,7 +605,7 @@ static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate ) ) #endif @@ -645,11 +645,11 @@ static void amdfam10_set_resources(device_t dev) amdfam10_create_vga_resource(dev, nodeid); /* Set each resource we have found */ - for(res = dev->resource_list; res; res = res->next) { + for (res = dev->resource_list; res; res = res->next) { amdfam10_set_resource(dev, res, nodeid); } - for(bus = dev->link_list; bus; bus = bus->next) { + for (bus = dev->link_list; bus; bus = bus->next) { if (bus->children) { assign_resources(bus); } @@ -711,7 +711,7 @@ static void amdfam10_domain_read_resources(device_t dev) /* Find the already assigned resource pairs */ get_fx_devs(); - for(reg = 0x80; reg <= 0xd8; reg+= 0x08) { + for (reg = 0x80; reg <= 0xd8; reg+= 0x08) { u32 base, limit; base = f1_read_config32(reg); limit = f1_read_config32(reg + 0x04); @@ -719,7 +719,7 @@ static void amdfam10_domain_read_resources(device_t dev) if ((base & 3) != 0) { unsigned nodeid, reg_link; device_t reg_dev; - if(reg<0xc0) { // mmio + if (reg<0xc0) { // mmio nodeid = (limit & 0xf) + (base&0x30); } else { // io nodeid = (limit & 0xf) + ((base>>4)&0x30); @@ -864,10 +864,10 @@ static struct hw_mem_hole_info get_hw_mem_hole_info(void) struct dram_base_mask_t d; u32 hole; d = get_dram_base_mask(i); - if(!(d.mask & 1)) continue; // no memory on this node + if (!(d.mask & 1)) continue; // no memory on this node hole = pci_read_config32(__f1_dev[i], 0xf0); - if(hole & 1) { // we find the hole + if (hole & 1) { // we find the hole mem_hole.hole_startk = (hole & (0xff<<24)) >> 10; mem_hole.node_id = i; // record the node No with hole break; // only one hole @@ -877,17 +877,17 @@ static struct hw_mem_hole_info get_hw_mem_hole_info(void) /* We need to double check if there is special set on base reg and limit reg * are not continuous instead of hole, it will find out its hole_startk. */ - if(mem_hole.node_id==-1) { + if (mem_hole.node_id==-1) { resource_t limitk_pri = 0; - for(i=0; i<sysconf.nodes; i++) { + for (i=0; i<sysconf.nodes; i++) { struct dram_base_mask_t d; resource_t base_k, limit_k; d = get_dram_base_mask(i); - if(!(d.base & 1)) continue; + if (!(d.base & 1)) continue; base_k = ((resource_t)(d.base & 0x1fffff00)) <<9; - if(base_k > 4 *1024 * 1024) break; // don't need to go to check - if(limitk_pri != base_k) { // we find the hole + if (base_k > 4 *1024 * 1024) break; // don't need to go to check + if (limitk_pri != base_k) { // we find the hole mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G mem_hole.node_id = i; break; //only one hole @@ -927,7 +927,7 @@ static void amdfam10_domain_set_resources(device_t dev) #endif pci_tolm = 0xffffffffUL; - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { pci_tolm = my_find_pci_tolm(link, pci_tolm); } @@ -960,12 +960,12 @@ static void amdfam10_domain_set_resources(device_t dev) #endif idx = 0x10; - for(i = 0; i < sysconf.nodes; i++) { + for (i = 0; i < sysconf.nodes; i++) { struct dram_base_mask_t d; resource_t basek, limitk, sizek; // 4 1T d = get_dram_base_mask(i); - if(!(d.mask & 1)) continue; + if (!(d.mask & 1)) continue; basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ; sizek = limitk - basek; @@ -986,7 +986,7 @@ static void amdfam10_domain_set_resources(device_t dev) if (basek <= mmio_basek) { unsigned pre_sizek; pre_sizek = mmio_basek - basek; - if(pre_sizek>0) { + if (pre_sizek>0) { ram_resource(dev, (idx | i), basek, pre_sizek); idx += 0x10; sizek -= pre_sizek; @@ -1011,7 +1011,7 @@ static void amdfam10_domain_set_resources(device_t dev) uma_resource(dev, 7, uma_memory_base >> 10, uma_memory_size >> 10); #endif - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } @@ -1024,11 +1024,11 @@ static void amdfam10_domain_scan_bus(device_t dev) int i; struct bus *link; /* Unmap all of the HT chains */ - for(reg = 0xe0; reg <= 0xec; reg += 4) { + for (reg = 0xe0; reg <= 0xec; reg += 4) { f1_write_config32(reg, 0); } - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { link->secondary = dev->bus->subordinate; pci_scan_bus(link, PCI_DEVFN(CONFIG_CDB, 0), 0xff); dev->bus->subordinate = link->subordinate; @@ -1038,7 +1038,7 @@ static void amdfam10_domain_scan_bus(device_t dev) * Including enabling relaxed ordering if it is safe. */ get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t f0_dev; f0_dev = __f0_dev[i]; if (f0_dev && f0_dev->enabled) { @@ -1348,7 +1348,7 @@ static void sysconf_init(device_t dev) // first node unsigned ht_c_index; - for(ht_c_index=0; ht_c_index<32; ht_c_index++) { + for (ht_c_index=0; ht_c_index<32; ht_c_index++) { sysconf.ht_c_conf_bus[ht_c_index] = 0; } @@ -1370,8 +1370,8 @@ static void sysconf_init(device_t dev) // first node sysconf.enabled_apic_ext_id = 1; } #if (CONFIG_APIC_ID_OFFSET>0) - if(sysconf.enabled_apic_ext_id) { - if(sysconf.bsp_apicid == 0) { + if (sysconf.enabled_apic_ext_id) { + if (sysconf.bsp_apicid == 0) { /* bsp apic id is not changed */ sysconf.apicid_offset = CONFIG_APIC_ID_OFFSET; } else { @@ -1452,7 +1452,7 @@ static void cpu_bus_scan(device_t dev) nb_cfg_54 = 0; ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf); - if(ApicIdCoreIdSize) { + if (ApicIdCoreIdSize) { siblings = (1<<ApicIdCoreIdSize)-1; } else { siblings = 3; //quad core @@ -1468,10 +1468,10 @@ static void cpu_bus_scan(device_t dev) #if CONFIG_CBB dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00 - if(dev_mc && dev_mc->bus) { + if (dev_mc && dev_mc->bus) { printk(BIOS_DEBUG, "%s found", dev_path(dev_mc)); pci_domain = dev_mc->bus->dev; - if(pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) { + if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) { printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc)); dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff printk(BIOS_DEBUG, "%s",dev_path(dev_mc)); @@ -1482,17 +1482,17 @@ static void cpu_bus_scan(device_t dev) printk(BIOS_DEBUG, "\n"); } dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0)); - if(!dev_mc) { + if (!dev_mc) { dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0)); if (dev_mc && dev_mc->bus) { printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc)); pci_domain = dev_mc->bus->dev; - if(pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) { - if((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) { + if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) { + if ((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) { printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc)); dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc)); - while(dev_mc){ + while (dev_mc){ printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc)); dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0); printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc)); @@ -1516,8 +1516,8 @@ static void cpu_bus_scan(device_t dev) nodes = sysconf.nodes; #if CONFIG_CBB && (NODE_NUMS > 32) - if(nodes>32) { // need to put node 32 to node 63 to bus 0xfe - if(pci_domain->link_list && !pci_domain->link_list->next) { + if (nodes>32) { // need to put node 32 to node 63 to bus 0xfe + if (pci_domain->link_list && !pci_domain->link_list->next) { struct bus *new_link = new_link(pci_domain); pci_domain->link_list->next = new_link; new_link->link_num = 1; @@ -1540,7 +1540,7 @@ static void cpu_bus_scan(device_t dev) if (disable_cu_siblings) printk(BIOS_DEBUG, "Disabling siblings on each compute unit as requested\n"); - for(i = 0; i < nodes; i++) { + for (i = 0; i < nodes; i++) { device_t cdb_dev; unsigned busn, devn; struct bus *pbus; @@ -1556,7 +1556,7 @@ static void cpu_bus_scan(device_t dev) devn = CONFIG_CDB+i; pbus = dev_mc->bus; #if CONFIG_CBB && (NODE_NUMS > 32) - if(i>=32) { + if (i>=32) { busn--; devn-=32; pbus = pci_domain->link_list->next; @@ -1570,7 +1570,7 @@ static void cpu_bus_scan(device_t dev) * ensure all of the cpu's pci devices are found. */ int fn; - for(fn = 0; fn <= 5; fn++) { //FBDIMM? + for (fn = 0; fn <= 5; fn++) { //FBDIMM? cdb_dev = pci_probe_dev(NULL, pbus, PCI_DEVFN(devn, fn)); } @@ -1630,7 +1630,7 @@ static void cpu_bus_scan(device_t dev) siblings = cores_found; u32 jj; - if(disable_siblings) { + if (disable_siblings) { jj = 0; } else { @@ -1665,7 +1665,7 @@ static void cpu_bus_scan(device_t dev) } #if CONFIG_ENABLE_APIC_EXT_ID && (CONFIG_APIC_ID_OFFSET>0) - if(sysconf.enabled_apic_ext_id) { + if (sysconf.enabled_apic_ext_id) { if (apic_id != 0 || sysconf.lift_bsp_apicid) { apic_id += sysconf.apicid_offset; } diff --git a/src/northbridge/amd/amdfam10/raminit_amdmct.c b/src/northbridge/amd/amdfam10/raminit_amdmct.c index 6166169812..6d063ab522 100644 --- a/src/northbridge/amd/amdfam10/raminit_amdmct.c +++ b/src/northbridge/amd/amdfam10/raminit_amdmct.c @@ -644,7 +644,7 @@ void mctGet_DIMMAddr(struct DCTStatStruc *pDCTstat, u32 node) struct sys_info *sysinfo = &sysinfo_car; struct mem_controller *ctrl = &( sysinfo->ctrl[node] ); - for(j=0;j<DIMM_SOCKETS;j++) { + for (j=0;j<DIMM_SOCKETS;j++) { pDCTstat->DIMMAddr[j*2] = ctrl->spd_addr[j] & 0xff; pDCTstat->DIMMAddr[j*2+1] = ctrl->spd_addr[DIMM_SOCKETS + j] & 0xff; } diff --git a/src/northbridge/amd/amdfam10/raminit_sysinfo_in_ram.c b/src/northbridge/amd/amdfam10/raminit_sysinfo_in_ram.c index 6450dd8b3c..df3850b87f 100644 --- a/src/northbridge/amd/amdfam10/raminit_sysinfo_in_ram.c +++ b/src/northbridge/amd/amdfam10/raminit_sysinfo_in_ram.c @@ -35,12 +35,12 @@ static u32 get_htic_bit(u8 i, u8 bit) static void wait_till_sysinfo_in_ram(void) { - while(1) { + while (1) { /* give the NB a break, many CPUs spinning on one bit makes a * lot of traffic and time is not too important to APs. */ udelay(1000); - if(get_htic_bit(0, 9)) return; + if (get_htic_bit(0, 9)) return; } } #endif @@ -56,7 +56,7 @@ static void fill_mem_ctrl(u32 controllers, struct mem_controller *ctrl_a, const int j; int index = 0; struct mem_controller *ctrl; - for(i=0;i<controllers; i++) { + for (i=0;i<controllers; i++) { ctrl = &ctrl_a[i]; ctrl->node_id = i; ctrl->f0 = NODE_PCI(i, 0); @@ -66,11 +66,11 @@ static void fill_mem_ctrl(u32 controllers, struct mem_controller *ctrl_a, const ctrl->f4 = NODE_PCI(i, 4); ctrl->f5 = NODE_PCI(i, 5); - if(spd_addr == (void *)0) continue; + if (spd_addr == (void *)0) continue; ctrl->spd_switch_addr = spd_addr[index++]; - for(j=0; j < 8; j++) { + for (j=0; j < 8; j++) { ctrl->spd_addr[j] = spd_addr[index++]; } diff --git a/src/northbridge/amd/amdfam10/reset_test.c b/src/northbridge/amd/amdfam10/reset_test.c index 0ed4ffdf85..bdf70b56b2 100644 --- a/src/northbridge/amd/amdfam10/reset_test.c +++ b/src/northbridge/amd/amdfam10/reset_test.c @@ -90,7 +90,7 @@ void set_bios_reset(void) nodes = ((pci_read_config32(PCI_DEV(CONFIG_CBB, CONFIG_CDB, 0), 0x60) >> 4) & 7) + 1; - for(i = 0; i < nodes; i++) { + for (i = 0; i < nodes; i++) { dev = NODE_PCI(i,0); htic = pci_read_config32(dev, HT_INIT_CONTROL); htic &= ~HTIC_BIOSR_Detect; @@ -110,7 +110,7 @@ static u8 node_link_to_bus(u8 node, u8 link) // node are 6 bit, and link three b // put node and link in correct bit val = ((node & 0x0f)<<4) | ((node & 0x30)<< (12-4)) | ((link & 0x07)<<8) ; - for(reg = 0xE0; reg < 0xF0; reg += 0x04) { + for (reg = 0xE0; reg < 0xF0; reg += 0x04) { u32 config_map; config_map = pci_io_read_config32(PCI_DEV(CONFIG_CBB, CONFIG_CDB, 1), reg); if ((config_map & 3) != 3) { diff --git a/src/northbridge/amd/amdfam10/setup_resource_map.c b/src/northbridge/amd/amdfam10/setup_resource_map.c index cd2f71302e..a14fc3c842 100644 --- a/src/northbridge/amd/amdfam10/setup_resource_map.c +++ b/src/northbridge/amd/amdfam10/setup_resource_map.c @@ -22,7 +22,7 @@ static void setup_resource_map(const u32 *register_values, u32 max) u32 i; // printk(BIOS_DEBUG, "setting up resource map...."); - for(i = 0; i < max; i += 3) { + for (i = 0; i < max; i += 3) { device_t dev; u32 where; u32 reg; @@ -42,7 +42,7 @@ void setup_resource_map_offset(const u32 *register_values, u32 max, u32 offset_p { u32 i; // printk(BIOS_DEBUG, "setting up resource map offset...."); - for(i = 0; i < max; i += 3) { + for (i = 0; i < max; i += 3) { device_t dev; u32 where; unsigned long reg; @@ -68,7 +68,7 @@ void setup_resource_map_x_offset(const u32 *register_values, u32 max, u32 offset if (IS_ENABLED(RES_DEBUG)) printk(BIOS_DEBUG, "setting up resource map ex offset....\n"); - for(i = 0; i < max; i += 4) { + for (i = 0; i < max; i += 4) { if (IS_ENABLED(RES_DEBUG)) printk(BIOS_DEBUG, "%04x: %02x %08x <- & %08x | %08x\n", i/4, register_values[i], @@ -140,7 +140,7 @@ void setup_resource_map_x(const u32 *register_values, u32 max) if (IS_ENABLED(RES_DEBUG)) printk(BIOS_DEBUG, "setting up resource map ex offset....\n"); - for(i = 0; i < max; i += 4) { + for (i = 0; i < max; i += 4) { if (IS_ENABLED(RES_DEBUG)) printk(BIOS_DEBUG, "%04x: %02x %08x <- & %08x | %08x\n", i/4, register_values[i],register_values[i+1], register_values[i+2], register_values[i+3]); @@ -194,7 +194,7 @@ static void setup_iob_resource_map(const u32 *register_values, u32 max) { u32 i; - for(i = 0; i < max; i += 3) { + for (i = 0; i < max; i += 3) { u32 where; u32 reg; @@ -210,7 +210,7 @@ static void setup_io_resource_map(const u32 *register_values, u32 max) { u32 i; - for(i = 0; i < max; i += 3) { + for (i = 0; i < max; i += 3) { u32 where; u32 reg; diff --git a/src/northbridge/amd/amdht/AsPsNb.c b/src/northbridge/amd/amdht/AsPsNb.c index 88b2e79343..e34fa4c7a3 100644 --- a/src/northbridge/amd/amdht/AsPsNb.c +++ b/src/northbridge/amd/amdht/AsPsNb.c @@ -43,38 +43,38 @@ u8 getMinNbCOF(void) numOfNode = getNumOfNodeNb(); /* go through each node for the minimum NbCOF (in multiple of CLKIN/2) */ - for(i=0; i < numOfNode; i++) + for (i=0; i < numOfNode; i++) { /* stub function for APIC ID virtualization for large MP system later */ deviceId = translateNodeIdToDeviceIdNb(i); /* read all P-state spec registers for NbDid=1 */ - for(j=0; j < 5; j++) + for (j=0; j < 5; j++) { AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_4,PS_SPEC_REG+(j*PCI_REG_LEN)), &dtemp); /*F4x1E0 + j*4 */ /* get NbDid */ - if(dtemp & NB_DID_MASK) + if (dtemp & NB_DID_MASK) nbDid = 1; } /* if F3x1FC[NbCofVidUpdate]=0, NbFid = default value */ AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,PRCT_INFO), &dtemp); /*F3x1FC*/ - if(!(dtemp & NB_CV_UPDATE)) /* F3x1FC[NbCofVidUpdated]=0, use default VID */ + if (!(dtemp & NB_CV_UPDATE)) /* F3x1FC[NbCofVidUpdated]=0, use default VID */ { AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,CPTC0), &dtemp); /*F3xD4*/ nextNbFid = (u8) (dtemp & BIT_MASK_5); - if(nbDid) + if (nbDid) nextNbFid = (u8) (nextNbFid >> 1); } else { /* check PVI/SPI */ AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,PW_CTL_MISC), &dtemp); /*F3xA0*/ - if(dtemp & PVI_MODE) /* PVI */ + if (dtemp & PVI_MODE) /* PVI */ { AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,PRCT_INFO), &dtemp); /*F3x1FC*/ nextNbFid = (u8) (dtemp >> UNI_NB_FID_BIT); nextNbFid &= BIT_MASK_5; - /* if(nbDid) + /* if (nbDid) nextNbFid = nextNbFid >> 1; */ } else /* SVI */ @@ -82,18 +82,18 @@ u8 getMinNbCOF(void) AmdPCIRead(MAKE_SBDFO(0,0,deviceId,FN_3,PRCT_INFO), &dtemp); /*F3x1FC*/ nextNbFid = (u8) ((dtemp >> UNI_NB_FID_BIT) & BIT_MASK_5); nextNbFid = (u8) (nextNbFid + ((dtemp >> SPLT_NB_FID_OFFSET) & BIT_MASK_3)); - /* if(nbDid) + /* if (nbDid) nextNbFid = nextNbFid >> 1; */ } } - if( i == 0) + if ( i == 0) nbFid = nextNbFid; - else if( nbFid > nextNbFid ) + else if ( nbFid > nextNbFid ) nbFid = nextNbFid; } /* add the base and convert to 100MHz divide by 2 if DID=1 */ - if(nbDid) + if (nbDid) nbFid = (u8) (nbFid + 4); else nbFid = (u8) ((nbFid + 4) << 1); diff --git a/src/northbridge/amd/amdk8/acpi.c b/src/northbridge/amd/amdk8/acpi.c index db521dafc0..992a85ed46 100644 --- a/src/northbridge/amd/amdk8/acpi.c +++ b/src/northbridge/amd/amdk8/acpi.c @@ -36,7 +36,7 @@ unsigned long acpi_create_madt_lapic_nmis(unsigned long current, u16 flags, u8 l device_t cpu; int cpu_index = 0; - for(cpu = all_devices; cpu; cpu = cpu->next) { + for (cpu = all_devices; cpu; cpu = cpu->next) { if ((cpu->path.type != DEVICE_PATH_APIC) || (cpu->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) { continue; @@ -55,7 +55,7 @@ unsigned long acpi_create_srat_lapics(unsigned long current) device_t cpu; int cpu_index = 0; - for(cpu = all_devices; cpu; cpu = cpu->next) { + for (cpu = all_devices; cpu; cpu = cpu->next) { if ((cpu->path.type != DEVICE_PATH_APIC) || (cpu->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) { continue; @@ -99,9 +99,9 @@ static void set_srat_mem(void *gp, struct device *dev, struct resource *res) * next range is from 1M--- * So will cut off before 1M in the mem range */ - if((basek+sizek)<1024) return; + if ((basek+sizek)<1024) return; - if(basek<1024) { + if (basek<1024) { sizek -= 1024 - basek; basek = 1024; } @@ -158,30 +158,30 @@ static unsigned long acpi_fill_slit(unsigned long current) p += 8; #if 0 - for(i=0;i<sysconf.hc_possible_num;i++) { - if((sysconf.pci1234[i]&1) !=1 ) continue; + for (i=0;i<sysconf.hc_possible_num;i++) { + if ((sysconf.pci1234[i]&1) !=1 ) continue; outer_node[(sysconf.pci1234[i] >> 4) & 0xf] = 1; // mark the outer node } #endif - for(i=0;i<nodes;i++) { - for(j=0;j<nodes; j++) { - if(i==j) { + for (i=0;i<nodes;i++) { + for (j=0;j<nodes; j++) { + if (i==j) { p[i*nodes+j] = 10; } else { #if 0 int k; u8 latency_factor = 0; int k_start, k_end; - if(i<j) { + if (i<j) { k_start = i; k_end = j; } else { k_start = j; k_end = i; } - for(k=k_start;k<=k_end; k++) { - if(outer_node[k]) { + for (k=k_start;k<=k_end; k++) { + if (outer_node[k]) { latency_factor = 1; break; } @@ -238,10 +238,10 @@ static void k8acpi_write_HT(void) { acpigen_write_name("HCLK"); acpigen_write_package(HC_POSSIBLE_NUM); - for(i=0;i<sysconf.hc_possible_num;i++) { + for (i=0;i<sysconf.hc_possible_num;i++) { acpigen_write_dword(sysconf.pci1234[i]); } - for(i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8 + for (i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8 acpigen_write_dword(0x0); } @@ -250,10 +250,10 @@ static void k8acpi_write_HT(void) { acpigen_write_name("HCDN"); acpigen_write_package(HC_POSSIBLE_NUM); - for(i=0;i<sysconf.hc_possible_num;i++) { + for (i=0;i<sysconf.hc_possible_num;i++) { acpigen_write_dword(sysconf.hcdn[i]); } - for(i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8 + for (i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8 acpigen_write_dword(0x20202020); } acpigen_pop_len(); @@ -268,7 +268,7 @@ static void k8acpi_write_pci_data(int dlen, const char *name, int offset) { acpigen_write_name(name); acpigen_write_package(dlen); - for(i=0; i<dlen; i++) { + for (i=0; i<dlen; i++) { dword = pci_read_config32(dev, offset+i*4); acpigen_write_dword(dword); } diff --git a/src/northbridge/amd/amdk8/coherent_ht.c b/src/northbridge/amd/amdk8/coherent_ht.c index a7c3fc27c6..7dfc5ef13d 100644 --- a/src/northbridge/amd/amdk8/coherent_ht.c +++ b/src/northbridge/amd/amdk8/coherent_ht.c @@ -259,7 +259,7 @@ static int verify_connection(u8 dest) * remode node's vendor/device id */ val = pci_read_config32(NODE_HT(dest),0); - if(val != 0x11001022) + if (val != 0x11001022) return 0; return 1; @@ -368,18 +368,18 @@ static int optimize_connection(device_t node1, uint8_t link1, device_t node2, ui static uint8_t get_linkn_first(uint8_t byte) { - if(byte & 0x02) { byte = 0; } - else if(byte & 0x04) { byte = 1; } - else if(byte & 0x08) { byte = 2; } + if (byte & 0x02) { byte = 0; } + else if (byte & 0x04) { byte = 1; } + else if (byte & 0x08) { byte = 2; } return byte; } #if TRY_HIGH_FIRST == 1 static uint8_t get_linkn_last(uint8_t byte) { - if(byte & 0x02) { byte &= 0x0f; byte |= 0x00; } - if(byte & 0x04) { byte &= 0x0f; byte |= 0x10; } - if(byte & 0x08) { byte &= 0x0f; byte |= 0x20; } + if (byte & 0x02) { byte &= 0x0f; byte |= 0x00; } + if (byte & 0x04) { byte &= 0x0f; byte |= 0x10; } + if (byte & 0x08) { byte &= 0x0f; byte |= 0x20; } return byte>>4; } #endif @@ -388,9 +388,9 @@ static uint8_t get_linkn_last(uint8_t byte) static uint8_t get_linkn_last_count(uint8_t byte) { byte &= 0x0f; - if(byte & 0x02) { byte &= 0xcf; byte |= 0x00; byte+=0x40; } - if(byte & 0x04) { byte &= 0xcf; byte |= 0x10; byte+=0x40; } - if(byte & 0x08) { byte &= 0xcf; byte |= 0x20; byte+=0x40; } + if (byte & 0x02) { byte &= 0xcf; byte |= 0x00; byte+=0x40; } + if (byte & 0x04) { byte &= 0xcf; byte |= 0x10; byte+=0x40; } + if (byte & 0x08) { byte &= 0xcf; byte |= 0x20; byte+=0x40; } return byte>>4; } #endif @@ -400,7 +400,7 @@ static void setup_row_local(u8 source, u8 row) /* source will be 7 when it is fo uint8_t linkn; uint32_t val; val = 1; - for(linkn = 0; linkn<3; linkn++) { + for (linkn = 0; linkn<3; linkn++) { uint8_t regpos; uint32_t reg; regpos = 0x98 + 0x20 * linkn; @@ -420,7 +420,7 @@ static void setup_row_direct_x(u8 temp, u8 source, u8 dest, u8 linkn) val = 1<<(linkn+1); val |= 1<<(linkn+1+8); /*for direct connect response route should equal to request table*/ - if(((source &1)!=(dest &1)) + if (((source &1)!=(dest &1)) #if CROSS_BAR_47_56 && ( (source<4)||(source>5) ) //(6,7) (7,6) should still be here //(6,5) (7,4) should be here @@ -452,7 +452,7 @@ static void opt_broadcast_rt_group(const u8 *conn, int num) { int i; - for(i=0; i<num; i+=3) { + for (i=0; i<num; i+=3) { opt_broadcast_rt(conn[i], conn[i+1],conn[i+2]); } } @@ -469,7 +469,7 @@ static void opt_broadcast_rt_plus_group(const u8 *conn, int num) { int i; - for(i=0; i<num; i+=3) { + for (i=0; i<num; i+=3) { opt_broadcast_rt_plus(conn[i], conn[i+1],conn[i+2]); } } @@ -507,7 +507,7 @@ static void setup_remote_node(u8 node) printk(BIOS_SPEW, "setup_remote_node: "); /* copy the default resource map from node 0 */ - for(i = 0; i < ARRAY_SIZE(pci_reg); i++) { + for (i = 0; i < ARRAY_SIZE(pci_reg); i++) { uint32_t value; uint8_t reg; reg = pci_reg[i]; @@ -534,7 +534,7 @@ static void setup_row_indirect_x(u8 temp, u8 source, u8 dest, u8 gateway, u8 dif #if !CROSS_BAR_47_56 u8 gateway; u8 diff; - if(source<dest) { + if (source<dest) { gateway = source + 2; } else { gateway = source - 2; @@ -551,7 +551,7 @@ static void setup_row_indirect_x(u8 temp, u8 source, u8 dest, u8 gateway, u8 dif diff = ((source&1)!=(dest &1)); #endif - if(diff && (val_s!=(val&0xff)) ) { /* use another connect as response*/ + if (diff && (val_s!=(val&0xff)) ) { /* use another connect as response*/ val_s -= val & 0xff; #if (CONFIG_MAX_PHYSICAL_CPUS > 4) || CONFIG_MAX_PHYSICAL_CPUS_4_BUT_MORE_INSTALLED uint8_t byte; @@ -560,13 +560,13 @@ static void setup_row_indirect_x(u8 temp, u8 source, u8 dest, u8 gateway, u8 dif */ byte = val_s; byte = get_linkn_last_count(byte); - if((byte>>2)>1) { /* make sure not the corner*/ - if(source<dest) { + if ((byte>>2)>1) { /* make sure not the corner*/ + if (source<dest) { val_s-=link_connection(temp, source-2); /* -down*/ } else { #if CROSS_BAR_47_56 #if 0 - if(source==7) { + if (source==7) { val_s-=link_connection(temp, 6); // for 7,2 via 5 } else if (source==6){ val_s-=link_connection(temp, 7); // for 6,3 via 4 @@ -584,7 +584,7 @@ static void setup_row_indirect_x(u8 temp, u8 source, u8 dest, u8 gateway, u8 dif val |= (val_s<<8); } - if(diff) { /* cross rung?*/ + if (diff) { /* cross rung?*/ val |= (1<<16); } else { @@ -613,10 +613,10 @@ static void setup_row_indirect_group(const u8 *conn, int num) int i; #if !CROSS_BAR_47_56 - for(i=0; i<num; i+=2) { + for (i=0; i<num; i+=2) { setup_row_indirect(conn[i], conn[i+1]); #else - for(i=0; i<num; i+=4) { + for (i=0; i<num; i+=4) { setup_row_indirect(conn[i], conn[i+1],conn[i+2], conn[i+3]); #endif @@ -640,10 +640,10 @@ static void setup_remote_row_indirect_group(const u8 *conn, int num) int i; #if !CROSS_BAR_47_56 - for(i=0; i<num; i+=2) { + for (i=0; i<num; i+=2) { setup_remote_row_indirect(conn[i], conn[i+1]); #else - for(i=0; i<num; i+=4) { + for (i=0; i<num; i+=4) { setup_remote_row_indirect(conn[i], conn[i+1],conn[i+2], conn[i+3]); #endif } @@ -667,7 +667,7 @@ static int optimize_connection_group(const u8 *opt_conn, int num) { int needs_reset = 0; int i; - for(i=0; i<num; i+=2) { + for (i=0; i<num; i+=2) { needs_reset = optimize_connection( NODE_HT(opt_conn[i]), 0x80 + link_to_register(link_connection(opt_conn[i],opt_conn[i+1])), NODE_HT(opt_conn[i+1]), 0x80 + link_to_register(link_connection(opt_conn[i+1],opt_conn[i])) ); @@ -688,7 +688,7 @@ static unsigned setup_smp2(void) val = get_row(0,0); byte = (val>>16) & 0xfe; - if(byte<0x2) { /* no coherent connection so get out.*/ + if (byte<0x2) { /* no coherent connection so get out.*/ nodes = 1; return nodes; } @@ -716,7 +716,7 @@ static unsigned setup_smp2(void) val = get_row(7,1); byte = (val>>16) & 0xfe; byte = get_linkn_last_count(byte); - if((byte>>2)==3) { /* Oh! we need to treat it as node2. So use another link*/ + if ((byte>>2)==3) { /* Oh! we need to treat it as node2. So use another link*/ val = get_row(0,0); byte = (val>>16) & 0xfe; #if TRY_HIGH_FIRST == 1 @@ -766,7 +766,7 @@ static unsigned setup_smp4(void) byte = ((val>>16) & 0xfe) - link_connection(0,1); byte = get_linkn_last_count(byte); - if((byte>>2)==0) { /* We should have two coherent for 4p and above*/ + if ((byte>>2)==0) { /* We should have two coherent for 4p and above*/ nodes = 2; return nodes; } @@ -840,9 +840,9 @@ static unsigned setup_smp4(void) #if (CONFIG_MAX_PHYSICAL_CPUS > 4) || CONFIG_MAX_PHYSICAL_CPUS_4_BUT_MORE_INSTALLED /* We need to find out which link is to node3 */ - if((byte>>2)==2) { /* one to node3, one to node0, one to node4*/ + if ((byte>>2)==2) { /* one to node3, one to node0, one to node4*/ val = get_row(7,3); - if((val>>16) == 1) { /* that link is to node4, because via node1 it has been set, recompute it*/ + if ((val>>16) == 1) { /* that link is to node4, because via node1 it has been set, recompute it*/ val = get_row(2,2); byte = ((val>>16) & 0xfe) - link_connection(2,0); byte = get_linkn_first(byte); @@ -864,7 +864,7 @@ static unsigned setup_smp4(void) val = get_row(7,3); byte = ((val>>16) & 0xfe) - link_connection(7,2) - link_connection(7,1); byte = get_linkn_last_count(byte); - if((byte>>2)==1) { /* We should have three coherent links on node 3 for 6p and above*/ + if ((byte>>2)==1) { /* We should have three coherent links on node 3 for 6p and above*/ byte &= 3; /*bit [3,2] is count-2*/ print_linkn("(3,5) link=", byte); setup_remote_row_direct(3, 5, byte); @@ -874,7 +874,7 @@ static unsigned setup_smp4(void) byte = ((val>>16) & 0xfe) - link_connection(2,3) - link_connection(2,0); byte = get_linkn_last_count(byte); - if((byte>>2)==1) { /* We should have three coherent link on node 2 for 6p and above*/ + if ((byte>>2)==1) { /* We should have three coherent link on node 2 for 6p and above*/ byte &= 3; /* bit [3,2] is count-2*/ print_linkn("(2,4) link=", byte); setup_row_direct(2, 4, byte); @@ -937,7 +937,7 @@ static unsigned setup_smp6(void) byte = ((val>>16) & 0xfe) - link_connection(2,3) - link_connection(2,0); byte = get_linkn_last_count(byte); - if((byte>>2)==0) { /* We should have three coherent link on node 2 for 6p and above*/ + if ((byte>>2)==0) { /* We should have three coherent link on node 2 for 6p and above*/ nodes = 4; return nodes; } @@ -947,7 +947,7 @@ static unsigned setup_smp6(void) val = get_row(3,3); byte = ((val>>16) & 0xfe) - link_connection(3,2) - link_connection(3,1); byte = get_linkn_last_count(byte); - if((byte>>2)==0) { /* We should have three coherent links on node 3 for 6p and above*/ + if ((byte>>2)==0) { /* We should have three coherent links on node 3 for 6p and above*/ nodes = 4; return nodes; } @@ -974,7 +974,7 @@ static unsigned setup_smp6(void) setup_row_indirect_group(conn6_1, ARRAY_SIZE(conn6_1)); - for(byte=0; byte<4; byte+=2) { + for (byte=0; byte<4; byte+=2) { setup_temp_row(byte,byte+2); } verify_connection(7); @@ -1002,7 +1002,7 @@ static unsigned setup_smp6(void) enable_routing(4); setup_temp_row(0,1); - for(byte=0; byte<4; byte+=2) { + for (byte=0; byte<4; byte+=2) { setup_temp_row(byte+1,byte+3); } verify_connection(7); @@ -1030,9 +1030,9 @@ static unsigned setup_smp6(void) #if CONFIG_MAX_PHYSICAL_CPUS > 6 /* We need to find out which link is to node5 */ - if((byte>>2)==2) { /* one to node5, one to node2, one to node6*/ + if ((byte>>2)==2) { /* one to node5, one to node2, one to node6*/ val = get_row(7,5); - if((val>>16) == 1) { /* that link is to node6, because via node 3 node 5 has been set*/ + if ((val>>16) == 1) { /* that link is to node6, because via node 3 node 5 has been set*/ val = get_row(4,4); byte = ((val>>16) & 0xfe) - link_connection(4,2); byte = get_linkn_first(byte); @@ -1053,7 +1053,7 @@ static unsigned setup_smp6(void) val = get_row(7,5); byte = ((val>>16) & 0xfe) - link_connection(7,4) - link_connection(7,3); byte = get_linkn_last_count(byte); - if((byte>>2)==1) { /* We should have three coherent links on node 5 for 6p and above*/ + if ((byte>>2)==1) { /* We should have three coherent links on node 5 for 6p and above*/ byte &= 3; /*bit [3,2] is count-2*/ print_linkn("(5,7) link=", byte); setup_remote_row_direct(5, 7, byte); @@ -1064,7 +1064,7 @@ static unsigned setup_smp6(void) byte = ((val>>16) & 0xfe) - link_connection(4,5) - link_connection(4,2); byte = get_linkn_last_count(byte); - if((byte>>2)==1) { /* We should have three coherent link on node 4 for 6p and above*/ + if ((byte>>2)==1) { /* We should have three coherent link on node 4 for 6p and above*/ byte &= 3; /* bit [3,2] is count-2*/ print_linkn("(4,6) link=", byte); setup_row_direct(4, 6, byte); @@ -1114,7 +1114,7 @@ static unsigned setup_smp6(void) /* We need to do sth about reverse about setup_temp_row (0,1), (2,4), (1, 3), (3,5) * It will be done by clear_dead_links */ - for(byte=0; byte<4; byte++) { + for (byte=0; byte<4; byte++) { clear_temp_row(byte); } #endif @@ -1142,7 +1142,7 @@ static unsigned setup_smp8(void) #else byte = ((val>>16) & 0xfe) - link_connection(4,5) - link_connection(4,2); byte = get_linkn_last_count(byte); /* Max link to 6*/ - if((byte>>2)==0) { /* We should have two or three coherent links on node 4 for 8p*/ + if ((byte>>2)==0) { /* We should have two or three coherent links on node 4 for 8p*/ nodes = 6; return nodes; } @@ -1150,7 +1150,7 @@ static unsigned setup_smp8(void) #if CROSS_BAR_47_56 byte = get_linkn_last_count(byte); /* Max link to 6*/ - if((byte>>2)<2) { /* We should have two or three coherent links on node 4 for 8p*/ + if ((byte>>2)<2) { /* We should have two or three coherent links on node 4 for 8p*/ nodes = 6; return nodes; } @@ -1169,7 +1169,7 @@ static unsigned setup_smp8(void) val = get_row(5,5); byte = ((val>>16) & 0xfe) - link_connection(5,4) - link_connection(5,3); byte = get_linkn_last_count(byte); - if((byte>>2)==0) { /* We should have three coherent links on node 5 for 6p and above*/ + if ((byte>>2)==0) { /* We should have three coherent links on node 5 for 6p and above*/ nodes = 6; return nodes; } @@ -1202,7 +1202,7 @@ static unsigned setup_smp8(void) setup_row_indirect_group(conn8_1,ARRAY_SIZE(conn8_1)); - for(byte=0; byte<6; byte+=2) { + for (byte=0; byte<6; byte+=2) { setup_temp_row(byte,byte+2); } verify_connection(7); @@ -1240,7 +1240,7 @@ static unsigned setup_smp8(void) setup_row_direct(5, 6, byte); setup_temp_row(0,1); /* temp. link between nodes 0 and 1 */ - for(byte=0; byte<4; byte+=2) { + for (byte=0; byte<4; byte+=2) { setup_temp_row(byte+1,byte+3); } setup_temp_row(5,6); @@ -1248,7 +1248,7 @@ static unsigned setup_smp8(void) verify_connection(7); val = get_row(7,6); // to chect it if it is node6 before renaming - if( (val>>16) == 1) { // it is real node 7 so swap it + if ( (val>>16) == 1) { // it is real node 7 so swap it /* We need to recompute link to 6 */ val = get_row(5,5); byte = ((val>>16) & 0xfe) - link_connection(5,3); @@ -1261,7 +1261,7 @@ static unsigned setup_smp8(void) setup_row_direct(5, 6, byte); #if 0 setup_temp_row(0,1); /* temp. link between nodes 0 and 1 */ - for(byte=0; byte<4; byte+=2) { + for (byte=0; byte<4; byte+=2) { setup_temp_row(byte+1,byte+3); } #endif @@ -1281,7 +1281,7 @@ static unsigned setup_smp8(void) #if !CROSS_BAR_47_56 setup_temp_row(0,1); - for(byte=0; byte<6; byte+=2) { + for (byte=0; byte<6; byte+=2) { setup_temp_row(byte+1,byte+3); } @@ -1301,7 +1301,7 @@ static unsigned setup_smp8(void) setup_row_direct(4, 7, byte); /* Setup and check temporary connection from Node 0 to Node 7 through 2, and 4*/ - for(byte=0; byte<4; byte+=2) { + for (byte=0; byte<4; byte+=2) { setup_temp_row(byte,byte+2); } @@ -1326,7 +1326,7 @@ static unsigned setup_smp8(void) setup_row_direct(5, 7, byte); setup_temp_row(0,1); /* temp. link between nodes 0 and 1 */ - for(byte=0; byte<4; byte+=2) { + for (byte=0; byte<4; byte+=2) { setup_temp_row(byte+1,byte+3); } @@ -1485,17 +1485,17 @@ static unsigned setup_smp(void) nodes = setup_smp2(); #if CONFIG_MAX_PHYSICAL_CPUS > 2 - if(nodes == 2) + if (nodes == 2) nodes = setup_smp4(); #endif #if CONFIG_MAX_PHYSICAL_CPUS > 4 - if(nodes == 4) + if (nodes == 4) nodes = setup_smp6(); #endif #if CONFIG_MAX_PHYSICAL_CPUS > 6 - if(nodes == 6) + if (nodes == 6) nodes = setup_smp8(); #endif @@ -1517,14 +1517,14 @@ static unsigned verify_mp_capabilities(unsigned nodes) switch(mask) { #if CONFIG_MAX_PHYSICAL_CPUS > 2 case 0x02: /* MPCap */ - if(nodes > 2) { + if (nodes > 2) { printk(BIOS_ERR, "Going back to DP\n"); return 2; } break; #endif case 0x00: /* Non SMP */ - if(nodes >1 ) { + if (nodes >1 ) { printk(BIOS_ERR, "Going back to UP\n"); return 1; } @@ -1541,22 +1541,22 @@ static void clear_dead_routes(unsigned nodes) int last_row; int node, row; #if CONFIG_MAX_PHYSICAL_CPUS == 8 - if(nodes==8) return;/* don't touch (7,7)*/ + if (nodes==8) return;/* don't touch (7,7)*/ #endif last_row = nodes; if (nodes == 1) { last_row = 0; } - for(node = 7; node >= 0; node--) { - for(row = 7; row >= last_row; row--) { + for (node = 7; node >= 0; node--) { + for (row = 7; row >= last_row; row--) { fill_row(node, row, DEFAULT); } } /* Update the local row */ - for( node=0; node<nodes; node++) { + for ( node=0; node<nodes; node++) { uint32_t val = 0; - for(row =0; row<nodes; row++) { + for (row =0; row<nodes; row++) { val |= get_row(node, row); } fill_row(node, node, (((val & 0xff) | ((val >> 8) & 0xff)) << 16) | 0x0101); @@ -1651,7 +1651,7 @@ static int apply_cpu_errata_fixes(unsigned nodes) { unsigned node; int needs_reset = 0; - for(node = 0; node < nodes; node++) { + for (node = 0; node < nodes; node++) { device_t dev; uint32_t cmd; dev = NODE_MC(node); @@ -1692,7 +1692,7 @@ static int apply_cpu_errata_fixes(unsigned nodes) */ cmd_ref = 0x04e20707; /* Registered */ cmd = pci_read_config32(dev, 0xd4); - if(cmd != cmd_ref) { + if (cmd != cmd_ref) { pci_write_config32(dev, 0xd4, cmd_ref ); needs_reset = 1; /* Needed? */ } @@ -1721,14 +1721,14 @@ static int optimize_link_read_pointers(unsigned nodes) { unsigned node; int needs_reset = 0; - for(node = 0; node < nodes; node++) { + for (node = 0; node < nodes; node++) { device_t f0_dev, f3_dev; uint32_t cmd_ref, cmd; int link; f0_dev = NODE_HT(node); f3_dev = NODE_MC(node); cmd_ref = cmd = pci_read_config32(f3_dev, 0xdc); - for(link = 0; link < 3; link++) { + for (link = 0; link < 3; link++) { uint32_t link_type; unsigned reg; /* This works on an Athlon64 because unimplemented links return 0 */ @@ -1764,14 +1764,14 @@ static int optimize_link_coherent_ht(void) nodes = get_nodes(); #if CONFIG_MAX_PHYSICAL_CPUS > 1 - if(nodes>1) { + if (nodes>1) { needs_reset |= optimize_connection( NODE_HT(0), 0x80 + link_to_register(link_connection(0,1)), NODE_HT(1), 0x80 + link_to_register(link_connection(1,0)) ); } #if CONFIG_MAX_PHYSICAL_CPUS > 2 - if(nodes>2) { + if (nodes>2) { /* optimize physical connections - by LYH */ static const u8 opt_conn4[] = { 0,2, @@ -1783,7 +1783,7 @@ static int optimize_link_coherent_ht(void) #endif #if CONFIG_MAX_PHYSICAL_CPUS > 4 - if(nodes>4) { + if (nodes>4) { static const uint8_t opt_conn6[] ={ 2, 4, 3, 5, @@ -1796,7 +1796,7 @@ static int optimize_link_coherent_ht(void) #endif #if CONFIG_MAX_PHYSICAL_CPUS > 6 - if(nodes>6) { + if (nodes>6) { static const uint8_t opt_conn8[] ={ 4, 6, #if CROSS_BAR_47_56 diff --git a/src/northbridge/amd/amdk8/debug.c b/src/northbridge/amd/amdk8/debug.c index c1021e5736..35353bd64a 100644 --- a/src/northbridge/amd/amdk8/debug.c +++ b/src/northbridge/amd/amdk8/debug.c @@ -19,7 +19,7 @@ static void print_debug_pci_dev(unsigned dev) static inline void print_pci_devices(void) { device_t dev; - for(dev = PCI_DEV(0, 0, 0); + for (dev = PCI_DEV(0, 0, 0); dev <= PCI_DEV(0xff, 0x1f, 0x7); dev += PCI_DEV(0,0,1)) { uint32_t id; @@ -31,10 +31,10 @@ static inline void print_pci_devices(void) } print_debug_pci_dev(dev); printk(BIOS_DEBUG, " %04x:%04x\n", (id & 0xffff), (id>>16)); - if(((dev>>12) & 0x07) == 0) { + if (((dev>>12) & 0x07) == 0) { uint8_t hdr_type; hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE); - if((hdr_type & 0x80) != 0x80) { + if ((hdr_type & 0x80) != 0x80) { dev += PCI_DEV(0,0,7); } } @@ -46,7 +46,7 @@ static void dump_pci_device(unsigned dev) int i; print_debug_pci_dev(dev); - for(i = 0; i < 256; i++) { + for (i = 0; i < 256; i++) { unsigned char val; if ((i & 0x0f) == 0) { printk(BIOS_DEBUG, "\n%02x:",i); @@ -65,12 +65,12 @@ static inline void dump_pci_device_index_wait(unsigned dev, uint32_t index_reg) print_debug_pci_dev(dev); printk(BIOS_DEBUG, " -- index_reg=%08x", index_reg); - for(i = 0; i < 0x40; i++) { + for (i = 0; i < 0x40; i++) { uint32_t val; int j; printk(BIOS_DEBUG, "\n%02x:",i); val = pci_read_config32_index_wait(dev, index_reg, i); - for(j=0;j<4;j++) { + for (j=0;j<4;j++) { printk(BIOS_DEBUG, " %02x", val & 0xff); val >>= 8; } @@ -83,7 +83,7 @@ static inline void dump_pci_device_index_wait(unsigned dev, uint32_t index_reg) static inline void dump_pci_devices(void) { device_t dev; - for(dev = PCI_DEV(0, 0, 0); + for (dev = PCI_DEV(0, 0, 0); dev <= PCI_DEV(0xff, 0x1f, 0x7); dev += PCI_DEV(0,0,1)) { uint32_t id; @@ -95,10 +95,10 @@ static inline void dump_pci_devices(void) } dump_pci_device(dev); - if(((dev>>12) & 0x07) == 0) { + if (((dev>>12) & 0x07) == 0) { uint8_t hdr_type; hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE); - if((hdr_type & 0x80) != 0x80) { + if ((hdr_type & 0x80) != 0x80) { dev += PCI_DEV(0,0,7); } } @@ -108,7 +108,7 @@ static inline void dump_pci_devices(void) static inline void dump_pci_devices_on_bus(unsigned busn) { device_t dev; - for(dev = PCI_DEV(busn, 0, 0); + for (dev = PCI_DEV(busn, 0, 0); dev <= PCI_DEV(busn, 0x1f, 0x7); dev += PCI_DEV(0,0,1)) { uint32_t id; @@ -120,10 +120,10 @@ static inline void dump_pci_devices_on_bus(unsigned busn) } dump_pci_device(dev); - if(((dev>>12) & 0x07) == 0) { + if (((dev>>12) & 0x07) == 0) { uint8_t hdr_type; hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE); - if((hdr_type & 0x80) != 0x80) { + if ((hdr_type & 0x80) != 0x80) { dev += PCI_DEV(0,0,7); } } @@ -136,13 +136,13 @@ static void dump_spd_registers(const struct mem_controller *ctrl) { int i; printk(BIOS_DEBUG, "\n"); - for(i = 0; i < 4; i++) { + for (i = 0; i < 4; i++) { unsigned device; device = ctrl->channel0[i]; if (device) { int j; printk(BIOS_DEBUG, "dimm: %02x.0: %02x", i, device); - for(j = 0; j < 128; j++) { + for (j = 0; j < 128; j++) { int status; unsigned char byte; if ((j & 0xf) == 0) { @@ -161,7 +161,7 @@ static void dump_spd_registers(const struct mem_controller *ctrl) if (device) { int j; printk(BIOS_DEBUG, "dimm: %02x.1: %02x", i, device); - for(j = 0; j < 128; j++) { + for (j = 0; j < 128; j++) { int status; unsigned char byte; if ((j & 0xf) == 0) { @@ -182,11 +182,11 @@ static void dump_smbus_registers(void) { unsigned device; printk(BIOS_DEBUG, "\n"); - for(device = 1; device < 0x80; device++) { + for (device = 1; device < 0x80; device++) { int j; - if( smbus_read_byte(device, 0) < 0 ) continue; + if ( smbus_read_byte(device, 0) < 0 ) continue; printk(BIOS_DEBUG, "smbus: %02x", device); - for(j = 0; j < 256; j++) { + for (j = 0; j < 256; j++) { int status; unsigned char byte; status = smbus_read_byte(device, j); @@ -210,7 +210,7 @@ static inline void dump_io_resources(unsigned port) int i; udelay(2000); printk(BIOS_DEBUG, "%04x:\n", port); - for(i=0;i<256;i++) { + for (i=0;i<256;i++) { uint8_t val; if ((i & 0x0f) == 0) { printk(BIOS_DEBUG, "%02x:", i); @@ -228,8 +228,8 @@ static inline void dump_mem(unsigned start, unsigned end) { unsigned i; printk(BIOS_DEBUG, "dump_mem:"); - for(i=start;i<end;i++) { - if((i & 0xf)==0) { + for (i=start;i<end;i++) { + if ((i & 0xf)==0) { printk(BIOS_DEBUG, "\n%08x:", i); } printk(BIOS_DEBUG, " %02x", (unsigned char)*((unsigned char *)i)); diff --git a/src/northbridge/amd/amdk8/early_ht.c b/src/northbridge/amd/amdk8/early_ht.c index 1bc34e42f1..8f6766d29a 100644 --- a/src/northbridge/amd/amdk8/early_ht.c +++ b/src/northbridge/amd/amdk8/early_ht.c @@ -47,7 +47,7 @@ static void enumerate_ht_chain(void) { pos = pci_read_config8(dev, PCI_CAPABILITY_LIST); } - while(pos != 0) { + while (pos != 0) { uint8_t cap; cap = pci_read_config8(dev, pos + PCI_CAP_LIST_ID); if (cap == PCI_CAP_ID_HT) { @@ -64,8 +64,8 @@ static void enumerate_ht_chain(void) device_t devx; #if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20 - if(next_unitid>=0x18) { // don't get mask out by k8, at this time BSP, RT is not enabled, it will response from 0x18,0--0x1f. - if(!end_used) { + if (next_unitid>=0x18) { // don't get mask out by k8, at this time BSP, RT is not enabled, it will response from 0x18,0--0x1f. + if (!end_used) { next_unitid = CONFIG_HT_CHAIN_END_UNITID_BASE; end_used = 1; } else { @@ -114,20 +114,20 @@ static void enumerate_ht_chain(void) break; } } - } while((ctrl & (1 << 5)) == 0); + } while ((ctrl & (1 << 5)) == 0); break; } } pos = pci_read_config8(dev, pos + PCI_CAP_LIST_NEXT); } - } while(last_unitid != next_unitid); + } while (last_unitid != next_unitid); out: ; #if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20 - if((ht_dev_num>1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used) { + if ((ht_dev_num>1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used) { uint16_t flags; dev = PCI_DEV(0,real_last_unitid, 0); flags = pci_read_config16(dev, real_last_pos + PCI_CAP_FLAGS); diff --git a/src/northbridge/amd/amdk8/f.h b/src/northbridge/amd/amdk8/f.h index f83282bfeb..a449c3412e 100644 --- a/src/northbridge/amd/amdk8/f.h +++ b/src/northbridge/amd/amdk8/f.h @@ -535,25 +535,25 @@ static inline void wait_all_core0_mem_trained(struct sys_info *sysinfo) unsigned needs_reset = 0; - if(sysinfo->nodes == 1) return; // in case only one CPU installed + if (sysinfo->nodes == 1) return; // in case only one CPU installed - for(i=1; i<sysinfo->nodes; i++) { + for (i=1; i<sysinfo->nodes; i++) { /* Skip everything if I don't have any memory on this controller */ - if(sysinfo->mem_trained[i]==0x00) continue; + if (sysinfo->mem_trained[i]==0x00) continue; mask |= (1<<i); } i = 1; - while(1) { - if(mask & (1<<i)) { - if((sysinfo->mem_trained[i])!=0x80) { + while (1) { + if (mask & (1<<i)) { + if ((sysinfo->mem_trained[i])!=0x80) { mask &= ~(1<<i); } } - if(!mask) break; + if (!mask) break; #if 0 /* cpu_relax */ @@ -564,7 +564,7 @@ static inline void wait_all_core0_mem_trained(struct sys_info *sysinfo) i%=sysinfo->nodes; } - for(i=0; i<sysinfo->nodes; i++) { + for (i=0; i<sysinfo->nodes; i++) { printk(BIOS_DEBUG, "mem_trained[%02x]=%02x\n", i, sysinfo->mem_trained[i]); switch(sysinfo->mem_trained[i]) { case 0: //don't need train @@ -577,7 +577,7 @@ static inline void wait_all_core0_mem_trained(struct sys_info *sysinfo) break; } } - if(needs_reset) { + if (needs_reset) { printk(BIOS_DEBUG, "mem trained failed\n"); #ifdef __PRE_RAM__ soft_reset(); diff --git a/src/northbridge/amd/amdk8/get_sblk_pci1234.c b/src/northbridge/amd/amdk8/get_sblk_pci1234.c index 3818dd11dd..9cf40830e4 100644 --- a/src/northbridge/amd/amdk8/get_sblk_pci1234.c +++ b/src/northbridge/amd/amdk8/get_sblk_pci1234.c @@ -38,7 +38,7 @@ unsigned node_link_to_bus(unsigned node, unsigned link) if (!dev) { return 0; } - for(reg = 0xE0; reg < 0xF0; reg += 0x04) { + for (reg = 0xE0; reg < 0xF0; reg += 0x04) { uint32_t config_map; unsigned dst_node; unsigned dst_link; @@ -211,22 +211,22 @@ void get_sblk_pci1234(void) dev = dev_find_slot(0, PCI_DEVFN(0x18, 1)); - for(j=0;j<4;j++) { + for (j=0;j<4;j++) { uint32_t dwordx; dwordx = pci_read_config32(dev, 0xe0 + j*4); dwordx &=0xffff0ff1; /* keep bus num, node_id, link_num, enable bits */ - if((dwordx & 0xff1) == dword) { /* SBLINK */ + if ((dwordx & 0xff1) == dword) { /* SBLINK */ sysconf.pci1234[0] = dwordx; sysconf.hcdn[0] = sysconf.hcdn_reg[j]; continue; } - if((dwordx & 1) == 1) { + if ((dwordx & 1) == 1) { /* We need to find out the number of HC * for exact match */ - for(i=1;i<sysconf.hc_possible_num;i++) { - if((dwordx & 0xff0) == (sysconf.pci1234[i] & 0xff0)) { + for (i=1;i<sysconf.hc_possible_num;i++) { + if ((dwordx & 0xff0) == (sysconf.pci1234[i] & 0xff0)) { sysconf.pci1234[i] = dwordx; sysconf.hcdn[i] = sysconf.hcdn_reg[j]; break; @@ -234,8 +234,8 @@ void get_sblk_pci1234(void) } /* For 0xff0 match or same node */ - for(i=1;i<sysconf.hc_possible_num;i++) { - if((dwordx & 0xff0) == (dwordx & sysconf.pci1234[i] & 0xff0)) { + for (i=1;i<sysconf.hc_possible_num;i++) { + if ((dwordx & 0xff0) == (dwordx & sysconf.pci1234[i] & 0xff0)) { sysconf.pci1234[i] = dwordx; sysconf.hcdn[i] = sysconf.hcdn_reg[j]; break; @@ -244,8 +244,8 @@ void get_sblk_pci1234(void) } } - for(i=1;i<sysconf.hc_possible_num;i++) { - if((sysconf.pci1234[i] & 1) != 1) { + for (i=1;i<sysconf.hc_possible_num;i++) { + if ((sysconf.pci1234[i] & 1) != 1) { sysconf.pci1234[i] = 0; sysconf.hcdn[i] = 0x20202020; } diff --git a/src/northbridge/amd/amdk8/incoherent_ht.c b/src/northbridge/amd/amdk8/incoherent_ht.c index 12b8290dd1..de2a4b3bb1 100644 --- a/src/northbridge/amd/amdk8/incoherent_ht.c +++ b/src/northbridge/amd/amdk8/incoherent_ht.c @@ -34,7 +34,7 @@ static uint8_t ht_lookup_capability(device_t dev, uint16_t val) if (pos > PCI_CAP_LIST_NEXT) { pos = pci_read_config8(dev, pos); } - while(pos != 0) { /* loop through the linked list */ + while (pos != 0) { /* loop through the linked list */ uint8_t cap; cap = pci_read_config8(dev, pos + PCI_CAP_LIST_ID); if (cap == PCI_CAP_ID_HT) { @@ -69,13 +69,13 @@ static void ht_collapse_previous_enumeration(uint8_t bus, unsigned offset_unitid //actually, only for one HT device HT chain, and unitid is 0 #if !CONFIG_HT_CHAIN_UNITID_BASE - if(offset_unitid) { + if (offset_unitid) { return; } #endif /* Check if is already collapsed */ - if((!offset_unitid) || (offset_unitid && (!((CONFIG_HT_CHAIN_END_UNITID_BASE == 0) && (CONFIG_HT_CHAIN_END_UNITID_BASE <CONFIG_HT_CHAIN_UNITID_BASE))))) { + if ((!offset_unitid) || (offset_unitid && (!((CONFIG_HT_CHAIN_END_UNITID_BASE == 0) && (CONFIG_HT_CHAIN_END_UNITID_BASE <CONFIG_HT_CHAIN_UNITID_BASE))))) { uint32_t id; dev = PCI_DEV(bus, 0, 0); id = pci_read_config32(dev, PCI_VENDOR_ID); @@ -88,7 +88,7 @@ static void ht_collapse_previous_enumeration(uint8_t bus, unsigned offset_unitid /* Spin through the devices and collapse any previous * hypertransport enumeration. */ - for(dev = PCI_DEV(bus, 1, 0); dev <= PCI_DEV(bus, 0x1f, 0x7); dev += PCI_DEV(0, 1, 0)) { + for (dev = PCI_DEV(bus, 1, 0); dev <= PCI_DEV(bus, 0x1f, 0x7); dev += PCI_DEV(0, 1, 0)) { uint32_t id; uint8_t pos; uint16_t flags; @@ -167,7 +167,7 @@ static uint8_t ht_read_width_cap(device_t dev, uint8_t pos) /* netlogic micro cap doesn't support 16 bit yet */ if (id == (0x184e | (0x0001 << 16))) { - if((width_cap & 0x77) == 0x11) { + if ((width_cap & 0x77) == 0x11) { width_cap &= 0x88; } } @@ -346,7 +346,7 @@ static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus, unsigned of break; } } - } while((ctrl & (1 << 5)) == 0); + } while ((ctrl & (1 << 5)) == 0); device_t dev = PCI_DEV(bus, 0, 0); last_unitid = next_unitid; @@ -371,9 +371,9 @@ static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus, unsigned of #if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20 - if(offset_unitid) { - if(next_unitid>= (bus ? 0x20:0x18) ) { - if(!end_used) { + if (offset_unitid) { + if (next_unitid>= (bus ? 0x20:0x18) ) { + if (!end_used) { next_unitid = CONFIG_HT_CHAIN_END_UNITID_BASE; end_used = 1; } else { @@ -436,7 +436,7 @@ out: end_of_chain: ; #if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20 - if(offset_unitid && (ht_dev_num>1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used ) { + if (offset_unitid && (ht_dev_num>1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used ) { uint16_t flags; flags = pci_read_config16(PCI_DEV(bus,real_last_unitid,0), real_last_pos + PCI_CAP_FLAGS); flags &= ~0x1f; @@ -446,14 +446,14 @@ end_of_chain: ; #if CONFIG_RAMINIT_SYSINFO // Here need to change the dev in the array int i; - for(i=0;i<sysinfo->link_pair_num;i++) + for (i=0;i<sysinfo->link_pair_num;i++) { struct link_pair_st *link_pair = &sysinfo->link_pair[i]; - if(link_pair->udev == PCI_DEV(bus, real_last_unitid, 0)) { + if (link_pair->udev == PCI_DEV(bus, real_last_unitid, 0)) { link_pair->udev = PCI_DEV(bus, CONFIG_HT_CHAIN_END_UNITID_BASE, 0); continue; } - if(link_pair->dev == PCI_DEV(bus, real_last_unitid, 0)) { + if (link_pair->dev == PCI_DEV(bus, real_last_unitid, 0)) { link_pair->dev = PCI_DEV(bus, CONFIG_HT_CHAIN_END_UNITID_BASE, 0); } } @@ -605,13 +605,13 @@ static int set_ht_link_buffer_counts_chain(uint8_t ht_c_num, unsigned vendorid, unsigned devn; reg = pci_read_config32(PCI_DEV(0,0x18,1), 0xe0 + i * 4); - if((reg & 3) != 3) continue; // not enabled + if ((reg & 3) != 3) continue; // not enabled nodeid = ((reg & 0xf0)>>4); // nodeid linkn = ((reg & 0xf00)>>8); // link n busn = (reg & 0xff0000)>>16; //busn - for(devn = 0; devn < 0x20; devn++) { + for (devn = 0; devn < 0x20; devn++) { reg = pci_read_config32( PCI_DEV(busn, devn, 0), PCI_VENDOR_ID); //1? if ( (reg & 0xffff) == vendorid ) { reset_needed |= set_ht_link_buffer_count(nodeid, linkn, 0x07,val); @@ -731,7 +731,7 @@ static int ht_setup_chains_x(void) #endif /* clean others */ - for(ht_c_num=1;ht_c_num<4; ht_c_num++) { + for (ht_c_num=1;ht_c_num<4; ht_c_num++) { pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4, 0); #if CONFIG_K8_ALLOCATE_IO_RANGE @@ -741,11 +741,11 @@ static int ht_setup_chains_x(void) #endif } - for(nodeid=0; nodeid<nodes; nodeid++) { + for (nodeid=0; nodeid<nodes; nodeid++) { device_t dev; uint8_t linkn; dev = PCI_DEV(0, 0x18+nodeid,0); - for(linkn = 0; linkn<3; linkn++) { + for (linkn = 0; linkn<3; linkn++) { unsigned regpos; regpos = 0x98 + 0x20 * linkn; reg = pci_read_config32(dev, regpos); @@ -753,15 +753,15 @@ static int ht_setup_chains_x(void) print_linkn_in("NC node|link=", ((nodeid & 0xf)<<4)|(linkn & 0xf)); tempreg = 3 | (nodeid <<4) | (linkn<<8); /*compare (temp & 0xffff), with (PCI(0, 0x18, 1) 0xe0 to 0xec & 0xfffff) */ - for(ht_c_num=0;ht_c_num<4; ht_c_num++) { + for (ht_c_num=0;ht_c_num<4; ht_c_num++) { reg = pci_read_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4); - if(((reg & 0xffff) == (tempreg & 0xffff)) || ((reg & 0xffff) == 0x0000)) { /*we got it*/ + if (((reg & 0xffff) == (tempreg & 0xffff)) || ((reg & 0xffff) == 0x0000)) { /*we got it*/ break; } } - if(ht_c_num == 4) break; /*used up only 4 non conherent allowed*/ + if (ht_c_num == 4) break; /*used up only 4 non conherent allowed*/ /*update to 0xe0...*/ - if((reg & 0xf) == 3) continue; /*SbLink so don't touch it */ + if ((reg & 0xf) == 3) continue; /*SbLink so don't touch it */ print_linkn_in("\tbusn=", next_busn); tempreg |= (next_busn<<16)|((next_busn+0x3f)<<24); pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4, tempreg); @@ -780,11 +780,11 @@ static int ht_setup_chains_x(void) } /*update 0xe0, 0xe4, 0xe8, 0xec from PCI_DEV(0, 0x18,1) to PCI_DEV(0, 0x19,1) to PCI_DEV(0, 0x1f,1);*/ - for(nodeid = 1; nodeid<nodes; nodeid++) { + for (nodeid = 1; nodeid<nodes; nodeid++) { int i; device_t dev; dev = PCI_DEV(0, 0x18+nodeid,1); - for(i = 0; i< 4; i++) { + for (i = 0; i< 4; i++) { unsigned regpos; regpos = 0xe0 + i * 4; reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos); @@ -793,13 +793,13 @@ static int ht_setup_chains_x(void) #if CONFIG_K8_ALLOCATE_IO_RANGE /* io range allocation */ - for(i = 0; i< 4; i++) { + for (i = 0; i< 4; i++) { unsigned regpos; regpos = 0xc4 + i * 8; reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos); pci_write_config32(dev, regpos, reg); } - for(i = 0; i< 4; i++) { + for (i = 0; i< 4; i++) { unsigned regpos; regpos = 0xc0 + i * 8; reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos); @@ -810,9 +810,9 @@ static int ht_setup_chains_x(void) /* recount ht_c_num*/ uint8_t i=0; - for(ht_c_num=0;ht_c_num<4; ht_c_num++) { + for (ht_c_num=0;ht_c_num<4; ht_c_num++) { reg = pci_read_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4); - if(((reg & 0xf) != 0x0)) { + if (((reg & 0xf) != 0x0)) { i++; } } @@ -838,7 +838,7 @@ static int optimize_link_incoherent_ht(struct sys_info *sysinfo) printk(BIOS_SPEW, "entering optimize_link_incoherent_ht\n"); printk(BIOS_SPEW, "sysinfo->link_pair_num=0x%x\n", link_pair_num); - for(i=0; i< link_pair_num; i++) { + for (i=0; i< link_pair_num; i++) { struct link_pair_st *link_pair= &sysinfo->link_pair[i]; reset_needed |= ht_optimize_link(link_pair->udev, link_pair->upos, link_pair->uoffs, link_pair->dev, link_pair->pos, link_pair->offs); printk(BIOS_SPEW, "after ht_optimize_link for link pair %d, reset_needed=0x%x\n", i, reset_needed); diff --git a/src/northbridge/amd/amdk8/misc_control.c b/src/northbridge/amd/amdk8/misc_control.c index 9b521eef79..a2b4651a4a 100644 --- a/src/northbridge/amd/amdk8/misc_control.c +++ b/src/northbridge/amd/amdk8/misc_control.c @@ -81,7 +81,7 @@ static void set_agp_aperture(device_t dev) /* Update the other northbridges */ pdev = 0; - while((pdev = dev_find_device(PCI_VENDOR_ID_AMD, 0x1103, pdev))) { + while ((pdev = dev_find_device(PCI_VENDOR_ID_AMD, 0x1103, pdev))) { /* Store the GART size but don't enable it */ pci_write_config32(pdev, 0x90, gart_acr); @@ -160,7 +160,7 @@ static void misc_control_init(struct device *dev) needs_reset = 1; /* Needed? */ } } - else if(is_cpu_pre_d0()) { + else if (is_cpu_pre_d0()) { struct device *f2_dev; uint32_t dcl; f2_dev = dev_find_slot(0, dev->path.pci.devfn - 3 + 2); @@ -174,7 +174,7 @@ static void misc_control_init(struct device *dev) cmd_ref = 0x000D0701; /* Unbuffered */ } cmd = pci_read_config32(dev, 0xd4); - if(cmd != cmd_ref) { + if (cmd != cmd_ref) { pci_write_config32(dev, 0xd4, cmd_ref ); needs_reset = 1; /* Needed? */ } @@ -185,7 +185,7 @@ static void misc_control_init(struct device *dev) if (f0_dev) { int link; cmd_ref = cmd = pci_read_config32(dev, 0xdc); - for(link = 0; link < 3; link++) { + for (link = 0; link < 3; link++) { uint32_t link_type; unsigned reg; /* This works on an Athlon64 because unimplemented links return 0 */ diff --git a/src/northbridge/amd/amdk8/northbridge.c b/src/northbridge/amd/amdk8/northbridge.c index c4fd3fdcee..d80c565d78 100644 --- a/src/northbridge/amd/amdk8/northbridge.c +++ b/src/northbridge/amd/amdk8/northbridge.c @@ -47,7 +47,7 @@ static unsigned fx_devs=0; static void get_fx_devs(void) { int i; - for(i = 0; i < MAX_FX_DEVS; i++) { + for (i = 0; i < MAX_FX_DEVS; i++) { __f0_dev[i] = dev_find_slot(0, PCI_DEVFN(0x18 + i, 0)); __f1_dev[i] = dev_find_slot(0, PCI_DEVFN(0x18 + i, 1)); if (__f0_dev[i] != NULL && __f1_dev[i] != NULL) @@ -70,7 +70,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -144,7 +144,7 @@ static void amdk8_scan_chain(struct bus *link) * register in function 1. */ free_reg = 0; - for(config_reg = 0xe0; config_reg <= 0xec; config_reg += 4) { + for (config_reg = 0xe0; config_reg <= 0xec; config_reg += 4) { u32 config; config = f1_read_config32(config_reg); if (!free_reg && ((config & 3) == 0)) { @@ -265,12 +265,12 @@ static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid, unsigned nodeid, link = 0; int result; res = 0; - for(nodeid = 0; !res && (nodeid < fx_devs); nodeid++) { + for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) { device_t dev; dev = __f0_dev[nodeid]; if (!dev) continue; - for(link = 0; !res && (link < 3); link++) { + for (link = 0; !res && (link < 3); link++) { res = probe_resource(dev, IOINDEX(0x100 + reg, link)); } } @@ -293,7 +293,7 @@ static unsigned amdk8_find_reg(device_t dev, unsigned nodeid, unsigned link, unsigned free_reg, reg; resource = 0; free_reg = 0; - for(reg = min; reg <= max; reg += 0x8) { + for (reg = min; reg <= max; reg += 0x8) { int result; result = reg_useable(reg, dev, nodeid, link); if (result == 1) { @@ -370,7 +370,7 @@ static void amdk8_read_resources(device_t dev) unsigned nodeid; struct bus *link; nodeid = amdk8_nodeid(dev); - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { amdk8_link_read_bases(dev, nodeid, link->link_num); } @@ -489,7 +489,7 @@ static void amdk8_create_vga_resource(device_t dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d link bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate ) ) #endif @@ -505,7 +505,7 @@ static void amdk8_create_vga_resource(device_t dev, unsigned nodeid) /* allocate a temp resource for the legacy VGA buffer */ resource = new_resource(dev, IOINDEX(4, link->link_num)); - if(!resource){ + if (!resource){ printk(BIOS_DEBUG, "VGA: %s out of resources.\n", dev_path(dev)); return; } @@ -526,7 +526,7 @@ static void amdk8_set_resources(device_t dev) nodeid = amdk8_nodeid(dev); /* Set each resource we have found */ - for(res = dev->resource_list; res; res = res->next) { + for (res = dev->resource_list; res; res = res->next) { struct resource *old = NULL; unsigned index; @@ -554,7 +554,7 @@ static void amdk8_set_resources(device_t dev) compact_resources(dev); - for(bus = dev->link_list; bus; bus = bus->next) { + for (bus = dev->link_list; bus; bus = bus->next) { if (bus->children) { assign_resources(bus); } @@ -609,7 +609,7 @@ static void amdk8_domain_read_resources(device_t dev) /* Find the already assigned resource pairs */ get_fx_devs(); - for(reg = 0x80; reg <= 0xd8; reg+= 0x08) { + for (reg = 0x80; reg <= 0xd8; reg+= 0x08) { u32 base, limit; base = f1_read_config32(reg); limit = f1_read_config32(reg + 0x04); @@ -685,7 +685,7 @@ static struct hw_mem_hole_info get_hw_mem_hole_info(void) } hole = pci_read_config32(__f1_dev[i], 0xf0); - if(hole & 1) { // we find the hole + if (hole & 1) { // we find the hole mem_hole.hole_startk = (hole & (0xff<<24)) >> 10; mem_hole.node_id = i; // record the node No with hole break; // only one hole @@ -695,9 +695,9 @@ static struct hw_mem_hole_info get_hw_mem_hole_info(void) /* We need to double check if there is special set on base reg and limit reg * are not continuous instead of hole, it will find out its hole_startk. */ - if(mem_hole.node_id==-1) { + if (mem_hole.node_id==-1) { u32 limitk_pri = 0; - for(i=0; i<8; i++) { + for (i=0; i<8; i++) { u32 base, limit; unsigned base_k, limit_k; base = f1_read_config32(0x40 + (i << 3)); @@ -706,7 +706,7 @@ static struct hw_mem_hole_info get_hw_mem_hole_info(void) } base_k = (base & 0xffff0000) >> 2; - if(limitk_pri != base_k) { // we find the hole + if (limitk_pri != base_k) { // we find the hole mem_hole.hole_startk = limitk_pri; mem_hole.node_id = i; break; //only one hole @@ -738,7 +738,7 @@ static void disable_hoist_memory(unsigned long hole_startk, int node_id) hole_sizek = (4*1024*1024) - hole_startk; - for(i=7;i>node_id;i--) { + for (i=7;i>node_id;i--) { base = f1_read_config32(0x40 + (i << 3)); if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) { @@ -756,7 +756,7 @@ static void disable_hoist_memory(unsigned long hole_startk, int node_id) return; } hoist = pci_read_config32(dev, 0xf0); - if(hoist & 1) { + if (hoist & 1) { pci_write_config32(dev, 0xf0, 0); } else { base = pci_read_config32(dev, 0x40 + (node_id << 3)); @@ -775,7 +775,7 @@ static u32 hoist_memory(unsigned long hole_startk, int node_id) carry_over = (4*1024*1024) - hole_startk; - for(i=7;i>node_id;i--) { + for (i=7;i>node_id;i--) { base = f1_read_config32(0x40 + (i << 3)); if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) { @@ -790,7 +790,7 @@ static u32 hoist_memory(unsigned long hole_startk, int node_id) dev = __f1_dev[node_id]; base = pci_read_config32(dev, 0x40 + (node_id << 3)); basek = (base & 0xffff0000) >> 2; - if(basek == hole_startk) { + if (basek == hole_startk) { //don't need set memhole here, because hole off set will be 0, overflow //so need to change base reg instead, new basek will be 4*1024*1024 base &= 0x0000ffff; @@ -904,8 +904,8 @@ static void amdk8_domain_set_resources(device_t dev) //mmio_basek = 3*1024*1024; // for debug to meet boundary - if(reset_memhole) { - if(mem_hole.node_id!=-1) { // We need to select CONFIG_HW_MEM_HOLE_SIZEK for raminit, it can not make hole_startk to some basek too....! + if (reset_memhole) { + if (mem_hole.node_id!=-1) { // We need to select CONFIG_HW_MEM_HOLE_SIZEK for raminit, it can not make hole_startk to some basek too....! // We need to reset our Mem Hole, because We want more big HOLE than we already set //Before that We need to disable mem hole at first, becase memhole could already be set on i+1 instead disable_hoist_memory(mem_hole.hole_startk, mem_hole.node_id); @@ -923,7 +923,7 @@ static void amdk8_domain_set_resources(device_t dev) } basek = (base & 0xffff0000) >> 2; - if(mmio_basek == basek) { + if (mmio_basek == basek) { mmio_basek -= (basek - basek_pri)>>1; // increase mem hole size to make sure it is on middle of pri node break; } @@ -939,7 +939,7 @@ static void amdk8_domain_set_resources(device_t dev) #endif idx = 0x10; - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { u32 base, limit; u32 basek, limitk, sizek; base = f1_read_config32(0x40 + (i << 3)); @@ -974,7 +974,7 @@ static void amdk8_domain_set_resources(device_t dev) if (basek <= mmio_basek) { unsigned pre_sizek; pre_sizek = mmio_basek - basek; - if(pre_sizek>0) { + if (pre_sizek>0) { ram_resource(dev, (idx | i), basek, pre_sizek); idx += 0x10; sizek -= pre_sizek; @@ -982,9 +982,9 @@ static void amdk8_domain_set_resources(device_t dev) ramtop = mmio_basek * 1024; } #if CONFIG_HW_MEM_HOLE_SIZEK != 0 - if(reset_memhole) + if (reset_memhole) #if !CONFIG_K8_REV_F_SUPPORT - if(!is_cpu_pre_e0() ) + if (!is_cpu_pre_e0() ) #endif sizek += hoist_memory(mmio_basek,i); #endif @@ -1025,7 +1025,7 @@ static void amdk8_domain_scan_bus(device_t dev) struct bus *link = dev->link_list; /* Unmap all of the HT chains */ - for(reg = 0xe0; reg <= 0xec; reg += 4) { + for (reg = 0xe0; reg <= 0xec; reg += 4) { f1_write_config32(reg, 0); } @@ -1037,7 +1037,7 @@ static void amdk8_domain_scan_bus(device_t dev) * Including enabling relaxed ordering if it is safe. */ get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t f0_dev; f0_dev = __f0_dev[i]; if (f0_dev && f0_dev->enabled) { @@ -1158,7 +1158,7 @@ static void cpu_bus_scan(device_t dev) if (pci_read_config32(dev_mc, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST)) { sysconf.enabled_apic_ext_id = 1; - if(bsp_apicid == 0) { + if (bsp_apicid == 0) { /* bsp apic id is not changed */ sysconf.apicid_offset = CONFIG_APIC_ID_OFFSET; } else @@ -1174,7 +1174,7 @@ static void cpu_bus_scan(device_t dev) /* Always use the devicetree node with lapic_id 0 for BSP. */ remap_bsp_lapic(cpu_bus); - for(i = 0; i < sysconf.nodes; i++) { + for (i = 0; i < sysconf.nodes; i++) { device_t cpu_dev; /* Find the cpu's pci device */ @@ -1185,7 +1185,7 @@ static void cpu_bus_scan(device_t dev) */ int local_j; device_t dev_f0; - for(local_j = 0; local_j <= 3; local_j++) { + for (local_j = 0; local_j <= 3; local_j++) { cpu_dev = pci_probe_dev(NULL, dev_mc->bus, PCI_DEVFN(0x18 + i, local_j)); } @@ -1193,7 +1193,7 @@ static void cpu_bus_scan(device_t dev) * otherwise the device under it will not be scanned */ dev_f0 = dev_find_slot(0, PCI_DEVFN(0x18+i,0)); - if(dev_f0) { + if (dev_f0) { add_more_links(dev_f0, 3); } } @@ -1205,12 +1205,12 @@ static void cpu_bus_scan(device_t dev) j = (j >> 12) & 3; // dev is func 3 printk(BIOS_DEBUG, " %s siblings=%d\n", dev_path(cpu_dev), j); - if(nb_cfg_54) { + if (nb_cfg_54) { // For e0 single core if nb_cfg_54 is set, apicid will be 0, 2, 4.... // ----> you can mixed single core e0 and dual core e0 at any sequence // That is the typical case - if(j == 0 ){ + if (j == 0 ){ #if !CONFIG_K8_REV_F_SUPPORT e0_later_single_core = is_e0_later_in_bsp(i); // single core #else @@ -1219,13 +1219,13 @@ static void cpu_bus_scan(device_t dev) } else { e0_later_single_core = 0; } - if(e0_later_single_core) { + if (e0_later_single_core) { printk(BIOS_DEBUG, "\tFound Rev E or Rev F later single core\n"); j=1; } - if(siblings > j ) { + if (siblings > j ) { } else { siblings = j; @@ -1236,7 +1236,7 @@ static void cpu_bus_scan(device_t dev) } u32 jj; - if(e0_later_single_core || disable_siblings) { + if (e0_later_single_core || disable_siblings) { jj = 0; } else { @@ -1245,7 +1245,7 @@ static void cpu_bus_scan(device_t dev) for (j = 0; j <=jj; j++ ) { u32 apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8); - if(sysconf.enabled_apic_ext_id) { + if (sysconf.enabled_apic_ext_id) { if (apic_id != 0 || sysconf.lift_bsp_apicid) { apic_id += sysconf.apicid_offset; } diff --git a/src/northbridge/amd/amdk8/raminit.c b/src/northbridge/amd/amdk8/raminit.c index f502287d22..a9516d8d28 100644 --- a/src/northbridge/amd/amdk8/raminit.c +++ b/src/northbridge/amd/amdk8/raminit.c @@ -2445,7 +2445,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl) if ((loops & 1023) == 0) { printk(BIOS_DEBUG, "."); } - } while(((dcl & whatWait) != 0) && (loops < TIMEOUT_LOOPS)); + } while (((dcl & whatWait) != 0) && (loops < TIMEOUT_LOOPS)); if (loops >= TIMEOUT_LOOPS) { printk(BIOS_DEBUG, " failed\n"); continue; @@ -2460,7 +2460,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl) #endif do { dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW); - } while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) || + } while (((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) || ((dcl & DCL_SRS))); } diff --git a/src/northbridge/amd/amdk8/raminit_f.c b/src/northbridge/amd/amdk8/raminit_f.c index 8ed0335703..765fea1f15 100644 --- a/src/northbridge/amd/amdk8/raminit_f.c +++ b/src/northbridge/amd/amdk8/raminit_f.c @@ -1647,7 +1647,7 @@ static uint8_t get_exact_divisor(int i, uint8_t divisor) /* Check for FID control support */ struct cpuid_result cpuid1; cpuid1 = cpuid(0x80000007); - if( cpuid1.edx & 0x02 ) { + if ( cpuid1.edx & 0x02 ) { /* Use current FID */ unsigned fid_cur; msr = rdmsr(0xc0010042); @@ -3118,7 +3118,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl, if ((loops & 1023) == 0) { printk(BIOS_DEBUG, "."); } - } while(((dcl & DCL_InitDram) != 0) && (loops < TIMEOUT_LOOPS)); + } while (((dcl & DCL_InitDram) != 0) && (loops < TIMEOUT_LOOPS)); if (loops >= TIMEOUT_LOOPS) { printk(BIOS_DEBUG, " failed\n"); continue; @@ -3127,7 +3127,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl, /* Wait until it is safe to touch memory */ do { dcm = pci_read_config32(ctrl[i].f2, DRAM_CTRL_MISC); - } while(((dcm & DCM_MemClrStatus) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/ ); + } while (((dcm & DCM_MemClrStatus) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/ ); #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1 if (cpu_f0_f1[i]) { diff --git a/src/northbridge/amd/amdk8/raminit_f_dqs.c b/src/northbridge/amd/amdk8/raminit_f_dqs.c index 0b777d18ac..567a8b6343 100644 --- a/src/northbridge/amd/amdk8/raminit_f_dqs.c +++ b/src/northbridge/amd/amdk8/raminit_f_dqs.c @@ -23,7 +23,7 @@ static inline void print_debug_dqs(const char *str, unsigned val, unsigned level) { #if DQS_TRAIN_DEBUG > 0 - if(DQS_TRAIN_DEBUG > level) { + if (DQS_TRAIN_DEBUG > level) { printk(BIOS_DEBUG, "%s%x\n", str, val); } #endif @@ -32,7 +32,7 @@ static inline void print_debug_dqs(const char *str, unsigned val, unsigned level static inline void print_debug_dqs_pair(const char *str, unsigned val, const char *str2, unsigned val2, unsigned level) { #if DQS_TRAIN_DEBUG > 0 - if(DQS_TRAIN_DEBUG > level) { + if (DQS_TRAIN_DEBUG > level) { printk(BIOS_DEBUG, "%s%08x%s%08x\n", str, val, str2, val2); } #endif @@ -41,7 +41,7 @@ static inline void print_debug_dqs_pair(const char *str, unsigned val, const cha static inline void print_debug_dqs_tsc(const char *str, unsigned i, unsigned val, unsigned val2, unsigned level) { #if DQS_TRAIN_DEBUG > 0 - if(DQS_TRAIN_DEBUG > level) { + if (DQS_TRAIN_DEBUG > level) { printk(BIOS_DEBUG, "%s[%02x]=%08x%08x\n", str, i, val, val2); } #endif @@ -59,7 +59,7 @@ static void fill_mem_cs_sysinfo(unsigned nodeid, const struct mem_controller *ct int i; sysinfo->mem_base[nodeid] = pci_read_config32(ctrl->f1, 0x40 + (nodeid<<3)); - for(i=0;i<8; i++) { + for (i=0;i<8; i++) { sysinfo->cs_base[nodeid*8+i] = pci_read_config32(ctrl->f2, 0x40 + (i<<2)); } @@ -87,10 +87,10 @@ static unsigned Get_MCTSysAddr(const struct mem_controller *ctrl, unsigned cs_i dword += mem_base; #if CONFIG_HW_MEM_HOLE_SIZEK != 0 hole_reg = sysinfo->hole_reg[nodeid]; - if(hole_reg & 1) { + if (hole_reg & 1) { unsigned hole_startk; hole_startk = (hole_reg & (0xff<<24)) >> 10; - if( (dword >= (hole_startk<<2)) && (dword < ((4*1024*1024)<<2))) { + if ( (dword >= (hole_startk<<2)) && (dword < ((4*1024*1024)<<2))) { dword += ((4*1024*1024 - hole_startk)<<2); } } @@ -196,7 +196,7 @@ static void WriteLNTestPattern(unsigned addr_lo, uint8_t *buf_a, unsigned line_n static void Write1LTestPattern(unsigned addr, unsigned p, uint8_t *buf_a, uint8_t *buf_b) { uint8_t *buf; - if(p==1) { buf = buf_b; } + if (p==1) { buf = buf_b; } else { buf = buf_a; } set_FSBASE (addr>>24); @@ -241,8 +241,8 @@ static unsigned CompareTestPatternQW0(unsigned channel, unsigned addr, unsigned uint32_t value_test; unsigned result = DQS_FAIL; - if(Pass == DQS_FIRST_PASS) { - if(pattern==1) { + if (Pass == DQS_FIRST_PASS) { + if (pattern==1) { test_buf = (uint32_t *)TestPattern1; } else { @@ -257,7 +257,7 @@ static unsigned CompareTestPatternQW0(unsigned channel, unsigned addr, unsigned addr_lo = addr<<8; - if(is_Width128 && (channel == 1)) { + if (is_Width128 && (channel == 1)) { addr_lo += 8; //second channel test_buf += 2; } @@ -273,7 +273,7 @@ static unsigned CompareTestPatternQW0(unsigned channel, unsigned addr, unsigned print_debug_dqs_pair("\t\t\t\t\t\tQW0.lo : test_buf= ", (unsigned)test_buf, " value = ", value_test, 4); print_debug_dqs_pair("\t\t\t\t\t\tQW0.lo : addr_lo = ", addr_lo, " value = ", value, 4); - if(value == value_test) { + if (value == value_test) { addr_lo += 4; test_buf++; __asm__ volatile ( @@ -284,13 +284,13 @@ static unsigned CompareTestPatternQW0(unsigned channel, unsigned addr, unsigned print_debug_dqs_pair("\t\t\t\t\t\tQW0.hi : test_buf= ", (unsigned)test_buf, " value = ", value_test, 4); print_debug_dqs_pair("\t\t\t\t\t\tQW0.hi : addr_lo = ", addr_lo, " value = ", value, 4); - if(value == value_test){ + if (value == value_test){ result = DQS_PASS; } } - if(Pass == DQS_SECOND_PASS) { // second pass need to be inverted - if(result==DQS_PASS) { + if (Pass == DQS_SECOND_PASS) { // second pass need to be inverted + if (result==DQS_PASS) { result = DQS_FAIL; } else { @@ -408,7 +408,7 @@ static uint16_t get_exact_T1000(unsigned i) /* Check for FID control support */ struct cpuid_result cpuid1; cpuid1 = cpuid(0x80000007); - if( cpuid1.edx & 0x02 ) { + if ( cpuid1.edx & 0x02 ) { /* Use current FID */ unsigned fid_cur; msr = rdmsr(0xc0010042); @@ -424,7 +424,7 @@ static uint16_t get_exact_T1000(unsigned i) index = fid_start>>25; } - if(index>12) return T1000_a[i]; + if (index>12) return T1000_a[i]; return TT_a[index * 4+i]; @@ -436,14 +436,14 @@ static void InitDQSPos4RcvrEn(const struct mem_controller *ctrl) uint32_t dword; dword = 0x00000000; - for(i=1; i<=3; i++) { + for (i=1; i<=3; i++) { /* Program the DQS Write Timing Control Registers (Function 2:Offset 0x9c, index 0x01-0x03, 0x21-0x23) to 0x00 for all bytes */ pci_write_config32_index_wait(ctrl->f2, 0x98, i, dword); pci_write_config32_index_wait(ctrl->f2, 0x98, i+0x20, dword); } dword = 0x2f2f2f2f; - for(i=5; i<=7; i++) { + for (i=5; i<=7; i++) { /* Program the DQS Write Timing Control Registers (Function 2:Offset 0x9c, index 0x05-0x07, 0x25-0x27) to 0x2f for all bytes */ pci_write_config32_index_wait(ctrl->f2, 0x98, i, dword); pci_write_config32_index_wait(ctrl->f2, 0x98, i+0x20, dword); @@ -512,7 +512,7 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st unsigned cpu_f0_f1 = 0; #endif - if(Pass == DQS_FIRST_PASS) { + if (Pass == DQS_FIRST_PASS) { InitDQSPos4RcvrEn(ctrl); } @@ -529,10 +529,10 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dword); - if(Pass == DQS_FIRST_PASS) { + if (Pass == DQS_FIRST_PASS) { #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1 cpu_f0_f1 = is_cpu_pre_f2_in_bsp(ctrl->node_id); - if(!cpu_f0_f1) + if (!cpu_f0_f1) #endif { #if 1 @@ -553,14 +553,14 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st // SetupRcvrPattern buf_a = (uint8_t *)(((uint32_t)(&pattern_buf_x[0]) + 0x10) & (0xfffffff0)); buf_b = buf_a + 128; //?? - if(Pass==DQS_FIRST_PASS) { - for(i=0;i<16;i++) { + if (Pass==DQS_FIRST_PASS) { + for (i=0;i<16;i++) { *((uint32_t *)(buf_a + i*4)) = TestPattern0[i]; *((uint32_t *)(buf_b + i*4)) = TestPattern1[i]; } } else { - for(i=0;i<16;i++) { + for (i=0;i<16;i++) { *((uint32_t *)(buf_a + i*4)) = TestPattern2[i]; *((uint32_t *)(buf_b + i*4)) = TestPattern2[i]; } @@ -586,21 +586,21 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st /* for each rank */ /* there are four receiver pairs, loosely associated with CS */ - for( receiver = 0; (receiver < 8) && (!Errors); receiver+=2) + for ( receiver = 0; (receiver < 8) && (!Errors); receiver+=2) { unsigned index=(receiver>>1) * 3 + 0x10; print_debug_dqs("\t\tTrainRcvEn52: index ", index, 2); - if(is_Width128) { - if(channel) { + if (is_Width128) { + if (channel) { dword = pci_read_config32_index_wait(ctrl->f2, 0x98, index); CurrRcvrCHADelay= dword & 0xff; } } else { - if(channel) { + if (channel) { index += 0x20; } } @@ -608,7 +608,7 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st LastTest = DQS_FAIL; RcvrEnDlyRmin = 0xaf; - if(!RcvrRankEnabled(ctrl, channel, receiver, is_Width128, sysinfo)) continue; + if (!RcvrRankEnabled(ctrl, channel, receiver, is_Width128, sysinfo)) continue; /* for each DQS receiver enable setting */ @@ -616,7 +616,7 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st TestAddr0B = TestAddr0 + (1<<(20+2-8)); // 4MB - if(RcvrRankEnabled(ctrl, channel, receiver+1, is_Width128, sysinfo)) { + if (RcvrRankEnabled(ctrl, channel, receiver+1, is_Width128, sysinfo)) { TestAddr1 = Get_RcvrSysAddr(ctrl, channel, receiver+1, sysinfo); TestAddr1B = TestAddr1 + (1<<(20+2-8)); //4MB two_ranks = 1; @@ -630,12 +630,12 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st Write1LTestPattern(TestAddr0, 0, buf_a, buf_b); // rank0 of dimm, test p0 Write1LTestPattern(TestAddr0B, 1, buf_a, buf_b); //rank0 of dimm, test p1 - if(two_ranks == 1) { + if (two_ranks == 1) { Write1LTestPattern(TestAddr1, 0, buf_a, buf_b); //rank 1 of dimm Write1LTestPattern(TestAddr1B, 1, buf_a, buf_b);//rank 1 of dimm } - if(Pass == DQS_FIRST_PASS) { + if (Pass == DQS_FIRST_PASS) { RcvrEnDly = 0; } else { RcvrEnDly = dqs_rcvr_dly_a[channel * 8 + receiver]; @@ -644,7 +644,7 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st while ( RcvrEnDly < 0xaf) { // Sweep Delay value here print_debug_dqs("\t\t\tTrainRcvEn541: RcvrEnDly ", RcvrEnDly, 3); - if(RcvrEnDly & 1) { + if (RcvrEnDly & 1) { /* Odd steps get another pattern such that even and odd steps alternate. The pointers to the patterns will be swapped @@ -663,7 +663,7 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st pci_write_config32_index_wait(ctrl->f2, 0x98, index, RcvrEnDly); /* FIXME: 64bit MUX */ - if(is_Width128) { + if (is_Width128) { /* Program current Receiver enable delay channel b */ pci_write_config32_index_wait(ctrl->f2, 0x98, index+ 0x20, RcvrEnDly); } @@ -685,7 +685,7 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st print_debug_dqs("\t\t\tTrainRcvEn542: Test0 ", Test0, 3); - if(Test0 == DQS_PASS) { + if (Test0 == DQS_PASS) { Read1LTestPattern(TestAddr0B); Test1 = CompareTestPatternQW0(channel, TestAddr0B, PatternB, TestPattern0, TestPattern1, TestPattern2, Pass, is_Width128); @@ -695,20 +695,20 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st print_debug_dqs("\t\t\tTrainRcvEn543: Test1 ", Test1, 3); - if(Test1 == DQS_PASS) { - if(two_ranks) { + if (Test1 == DQS_PASS) { + if (two_ranks) { Read1LTestPattern(TestAddr1); Test0 = CompareTestPatternQW0(channel, TestAddr1, PatternA, TestPattern0, TestPattern1, TestPattern2, Pass, is_Width128); proc_IOCLFLUSH(TestAddr1); ResetDCTWrPtr(ctrl); - if(Test0 == DQS_PASS) { + if (Test0 == DQS_PASS) { Read1LTestPattern(TestAddr1B); Test1 = CompareTestPatternQW0(channel, TestAddr1B, PatternB, TestPattern0, TestPattern1, TestPattern2, Pass, is_Width128); proc_IOCLFLUSH(TestAddr1B); ResetDCTWrPtr(ctrl); - if(Test1 == DQS_PASS) { + if (Test1 == DQS_PASS) { CurrTest = DQS_PASS; } } @@ -722,8 +722,8 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st print_debug_dqs("\t\t\tTrainRcvEn55: RcvrEnDly ", RcvrEnDly, 3); - if(CurrTest == DQS_PASS) { - if(LastTest == DQS_FAIL) { + if (CurrTest == DQS_PASS) { + if (LastTest == DQS_FAIL) { RcvrEnDlyRmin = RcvrEnDly; break; } @@ -749,25 +749,25 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st print_debug_dqs("\t\tTrainRcvEn61: RcvrEnDly ", RcvrEnDly, 2); - if(RcvrEnDlyRmin == 0xaf) { + if (RcvrEnDlyRmin == 0xaf) { //no passing window Errors |= SB_NORCVREN; } - if(Pass == DQS_FIRST_PASS) { + if (Pass == DQS_FIRST_PASS) { // We need a better value for DQSPos training RcvrEnDly = RcvrEnDlyRmin /* + RCVREN_MARGIN * T1000/64/50 */; } else { RcvrEnDly = RcvrEnDlyRmin; } - if(RcvrEnDly > 0xae) { + if (RcvrEnDly > 0xae) { //passing window too narrow, too far delayed Errors |= SB_SmallRCVR; RcvrEnDly = 0xae; } - if(Pass == DQS_SECOND_PASS) { //second pass must average vales + if (Pass == DQS_SECOND_PASS) { //second pass must average vales RcvrEnDly += dqs_rcvr_dly_a[channel * 8 + receiver] /* - (RCVREN_MARGIN * T1000/64/50)*/; RcvrEnDly >>= 1; } @@ -777,18 +777,18 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st //Set final RcvrEnDly for this DIMM and Channel pci_write_config32_index_wait(ctrl->f2, 0x98, index, RcvrEnDly); - if(is_Width128) { + if (is_Width128) { pci_write_config32_index_wait(ctrl->f2, 0x98, index+0x20, RcvrEnDly); // channel B - if(channel) { + if (channel) { pci_write_config32_index_wait(ctrl->f2, 0x98, index, CurrRcvrCHADelay); - if(RcvrEnDly > CurrRcvrCHADelay) { + if (RcvrEnDly > CurrRcvrCHADelay) { dword = RcvrEnDly - CurrRcvrCHADelay; } else { dword = CurrRcvrCHADelay - RcvrEnDly; } dword *= 50; - if(dword > T1000) { + if (dword > T1000) { Errors |= SB_CHA2BRCVREN; } } @@ -796,7 +796,7 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st print_debug_dqs("\t\tTrainRcvEn63: RcvrEnDly ", RcvrEnDly, 2); - if(RcvrEnDly > CTLRMaxDelay) { + if (RcvrEnDly > CTLRMaxDelay) { CTLRMaxDelay = RcvrEnDly; } @@ -817,9 +817,9 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st dword |= ecc_bit; pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dword); - if(Pass == DQS_FIRST_PASS) { + if (Pass == DQS_FIRST_PASS) { #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1 - if(!cpu_f0_f1) + if (!cpu_f0_f1) #endif { dword = pci_read_config32(ctrl->f2, DRAM_CTRL); @@ -858,7 +858,7 @@ static void SetDQSDelayCSR(const struct mem_controller *ctrl, unsigned channel, index = (bytelane>>2) + 1 + channel * 0x20 + (direction << 2); shift = bytelane; - while(shift>3) { + while (shift>3) { shift-=4; } shift <<= 3; // 8 bit @@ -878,13 +878,13 @@ static void SetDQSDelayAllCSR(const struct mem_controller *ctrl, unsigned channe dword = 0; dqs_delay &= 0xff; - for(i=0;i<4;i++) { + for (i=0;i<4;i++) { dword |= dqs_delay<<(i*8); } index = 1 + channel * 0x20 + direction * 4; - for(i=0; i<2; i++) { + for (i=0; i<2; i++) { pci_write_config32_index_wait(ctrl->f2, 0x98, index + i, dword); } @@ -894,7 +894,7 @@ static unsigned MiddleDQS(unsigned min_d, unsigned max_d) { unsigned size_d; size_d = max_d-min_d; - if(size_d & 1) { //need round up + if (size_d & 1) { //need round up min_d++; } return ( min_d + (size_d>>1)); @@ -968,7 +968,7 @@ static void ReadL9TestPattern(unsigned addr_lo) static void ReadDQSTestPattern(unsigned addr_lo, unsigned pattern) { - if(pattern == 0) { + if (pattern == 0) { ReadL9TestPattern(addr_lo); } else { @@ -1028,7 +1028,7 @@ static __attribute__((noinline)) void FlushDQSTestPattern_L18(unsigned addr_lo) static void FlushDQSTestPattern(unsigned addr_lo, unsigned pattern ) { - if(pattern == 0){ + if (pattern == 0){ FlushDQSTestPattern_L9(addr_lo); } else { @@ -1049,13 +1049,13 @@ static unsigned CompareDQSTestPattern(unsigned channel, unsigned addr_lo, unsign test_buf = (uint32_t *)buf_a; - if(pattern && channel) { + if (pattern && channel) { addr_lo += 8; //second channel test_buf+= 2; } bytelane = 0; - for(i=0;i<9*64/4;i++) { + for (i=0;i<9*64/4;i++) { __asm__ volatile ( "movl %%fs:(%1), %0\n\t" :"=b"(value): "a" (addr_lo) @@ -1065,8 +1065,8 @@ static unsigned CompareDQSTestPattern(unsigned channel, unsigned addr_lo, unsign print_debug_dqs_pair("\t\t\t\t\t\ttest_buf= ", (unsigned)test_buf, " value = ", value_test, 7); print_debug_dqs_pair("\t\t\t\t\t\ttaddr_lo = ",addr_lo, " value = ", value, 7); - for(j=0;j<4*8;j+=8) { - if(((value>>j)&0xff) != ((value_test>>j)& 0xff)) { + for (j=0;j<4*8;j+=8) { + if (((value>>j)&0xff) != ((value_test>>j)& 0xff)) { bitmap &= ~(1<<bytelane); } @@ -1075,8 +1075,8 @@ static unsigned CompareDQSTestPattern(unsigned channel, unsigned addr_lo, unsign } print_debug_dqs("\t\t\t\t\t\tbitmap = ", bitmap, 7); - if(bytelane == 0) { - if(pattern == 1) { //dual channel + if (bytelane == 0) { + if (pattern == 1) { //dual channel addr_lo += 8; //skip over other channel's data test_buf += 2; } @@ -1115,14 +1115,14 @@ static unsigned TrainDQSPos(const struct mem_controller *ctrl, unsigned channel, printk(BIOS_DEBUG, "TrainDQSPos: MutualCSPassW[48] :%p\n", MutualCSPassW); - for(DQSDelay=0; DQSDelay<48; DQSDelay++) { + for (DQSDelay=0; DQSDelay<48; DQSDelay++) { MutualCSPassW[DQSDelay] = 0xff; // Bitmapped status per delay setting, 0xff=All positions passing (1= PASS) } - for(ChipSel = 0; ChipSel < 8; ChipSel++) { //logical register chipselects 0..7 + for (ChipSel = 0; ChipSel < 8; ChipSel++) { //logical register chipselects 0..7 print_debug_dqs("\t\t\t\tTrainDQSPos: 11 ChipSel ", ChipSel, 4); //FIXME: process 64MUXedMode - if(!ChipSelPresent(ctrl, ChipSel, sysinfo)) continue; + if (!ChipSelPresent(ctrl, ChipSel, sysinfo)) continue; BanksPresent = 1; TestAddr = Get_MCTSysAddr(ctrl, ChipSel, sysinfo); @@ -1132,17 +1132,17 @@ static unsigned TrainDQSPos(const struct mem_controller *ctrl, unsigned channel, //set fs and use fs prefix to access the mem set_FSBASE(TestAddr>>24); - if(Direction == DQS_READDIR) { + if (Direction == DQS_READDIR) { print_debug_dqs("\t\t\t\tTrainDQSPos: 13 for read so write at first", 0, 4); WriteDQSTestPattern(TestAddr<<8, Pattern, buf_a); } - for(DQSDelay = 0; DQSDelay < 48; DQSDelay++ ){ + for (DQSDelay = 0; DQSDelay < 48; DQSDelay++ ){ print_debug_dqs("\t\t\t\t\tTrainDQSPos: 141 DQSDelay ", DQSDelay, 5); - if(MutualCSPassW[DQSDelay] == 0) continue; //skip current delay value if other chipselects have failed all 8 bytelanes + if (MutualCSPassW[DQSDelay] == 0) continue; //skip current delay value if other chipselects have failed all 8 bytelanes SetDQSDelayAllCSR(ctrl, channel, Direction, DQSDelay); print_debug_dqs("\t\t\t\t\tTrainDQSPos: 142 MutualCSPassW ", MutualCSPassW[DQSDelay], 5); - if(Direction == DQS_WRITEDIR) { + if (Direction == DQS_WRITEDIR) { print_debug_dqs("\t\t\t\t\tTrainDQSPos: 143 for write", 0, 5); WriteDQSTestPattern(TestAddr<<8, Pattern, buf_a); } @@ -1157,25 +1157,25 @@ static unsigned TrainDQSPos(const struct mem_controller *ctrl, unsigned channel, } } - if(BanksPresent) - for(ByteLane = 0; ByteLane < 8; ByteLane++) { + if (BanksPresent) + for (ByteLane = 0; ByteLane < 8; ByteLane++) { print_debug_dqs("\t\t\t\tTrainDQSPos: 31 ByteLane ",ByteLane, 4); LastTest = DQS_FAIL; RnkDlySeqPassMax = 0; RnkDlyFilterMax = 0; RnkDlyFilterMin = 0; - for(DQSDelay=0; DQSDelay<48; DQSDelay++) { - if(MutualCSPassW[DQSDelay] & (1<<ByteLane)) { + for (DQSDelay=0; DQSDelay<48; DQSDelay++) { + if (MutualCSPassW[DQSDelay] & (1<<ByteLane)) { print_debug_dqs("\t\t\t\t\tTrainDQSPos: 321 DQSDelay ", DQSDelay, 5); print_debug_dqs("\t\t\t\t\tTrainDQSPos: 322 MutualCSPassW ", MutualCSPassW[DQSDelay], 5); RnkDlySeqPassMax = DQSDelay; - if(LastTest == DQS_FAIL) { + if (LastTest == DQS_FAIL) { RnkDlySeqPassMin = DQSDelay; //start sequential run } - if((RnkDlySeqPassMax - RnkDlySeqPassMin)>(RnkDlyFilterMax-RnkDlyFilterMin)){ + if ((RnkDlySeqPassMax - RnkDlySeqPassMin)>(RnkDlyFilterMax-RnkDlyFilterMin)){ RnkDlyFilterMin = RnkDlySeqPassMin; RnkDlyFilterMax = RnkDlySeqPassMax; } @@ -1187,13 +1187,13 @@ static unsigned TrainDQSPos(const struct mem_controller *ctrl, unsigned channel, } print_debug_dqs("\t\t\t\tTrainDQSPos: 33 RnkDlySeqPassMax ", RnkDlySeqPassMax, 4); - if(RnkDlySeqPassMax == 0) { + if (RnkDlySeqPassMax == 0) { Errors |= SB_NODQSPOS; // no passing window } else { print_debug_dqs("\t\t\t\tTrainDQSPos: 34 RnkDlyFilterMax ", RnkDlyFilterMax, 4); print_debug_dqs("\t\t\t\tTrainDQSPos: 34 RnkDlyFilterMin ", RnkDlyFilterMin, 4); - if((RnkDlyFilterMax - RnkDlyFilterMin)< MIN_DQS_WNDW){ + if ((RnkDlyFilterMax - RnkDlyFilterMin)< MIN_DQS_WNDW){ Errors |= SB_SMALLDQS; } else { @@ -1370,15 +1370,15 @@ static unsigned TrainDQSRdWrPos(const struct mem_controller *ctrl, struct sys_in //SetupDqsPattern buf_a = (uint8_t *)(((uint32_t)(&pattern_buf_x[0]) + 0x10) & (~0xf)); - if(is_Width128){ + if (is_Width128){ pattern = 1; - for(i=0;i<16*18;i++) { + for (i=0;i<16*18;i++) { *((uint32_t *)(buf_a + i*4)) = TestPatternJD1b[i]; } } else { pattern = 0; - for(i=0; i<16*9;i++) { + for (i=0; i<16*9;i++) { *((uint32_t *)(buf_a + i*4)) = TestPatternJD1a[i]; } @@ -1396,27 +1396,27 @@ static unsigned TrainDQSRdWrPos(const struct mem_controller *ctrl, struct sys_in channel = 1; } - while( (channel<2) && (!Errors)) { + while ( (channel<2) && (!Errors)) { print_debug_dqs("\tTrainDQSRdWrPos: 1 channel ",channel, 1); - for(DQSWrDelay = 0; DQSWrDelay < 48; DQSWrDelay++) { + for (DQSWrDelay = 0; DQSWrDelay < 48; DQSWrDelay++) { unsigned err; SetDQSDelayAllCSR(ctrl, channel, DQS_WRITEDIR, DQSWrDelay); print_debug_dqs("\t\tTrainDQSRdWrPos: 21 DQSWrDelay ", DQSWrDelay, 2); err= TrainReadDQS(ctrl, channel, pattern, buf_a, dqs_delay_a, sysinfo); print_debug_dqs("\t\tTrainDQSRdWrPos: 22 err ",err, 2); - if(err == 0) break; + if (err == 0) break; Errors |= err; } print_debug_dqs("\tTrainDQSRdWrPos: 3 DQSWrDelay ", DQSWrDelay, 1); - if(DQSWrDelay < 48) { + if (DQSWrDelay < 48) { Errors = TrainWriteDQS(ctrl, channel, pattern, buf_a, dqs_delay_a, sysinfo); print_debug_dqs("\tTrainDQSRdWrPos: 4 Errors ", Errors, 1); } channel++; - if(!is_Width128){ + if (!is_Width128){ //FIXME: 64MuxMode?? channel++; // skip channel if 64-bit mode } @@ -1457,7 +1457,7 @@ static unsigned CalcEccDQSPos(unsigned channel,unsigned ByteLane0, unsigned Byte DQSDelay0 = get_dqs_delay(channel, ByteLane0, Direction, dqs_delay_a); DQSDelay1 = get_dqs_delay(channel, ByteLane1, Direction, dqs_delay_a); - if(DQSDelay0>DQSDelay1) { + if (DQSDelay0>DQSDelay1) { DQSDelay = DQSDelay0 - DQSDelay1; InterFactor = 0xff - InterFactor; } @@ -1469,7 +1469,7 @@ static unsigned CalcEccDQSPos(unsigned channel,unsigned ByteLane0, unsigned Byte DQSDelay >>= 8; // /255 - if(DQSDelay0>DQSDelay1) { + if (DQSDelay0>DQSDelay1) { DQSDelay += DQSDelay1; } else { @@ -1494,8 +1494,8 @@ static void SetEccDQSRdWrPos(const struct mem_controller *ctrl, struct sys_info ByteLane = 8; - for(channel = 0; channel < 2; channel++) { - for(i=0;i<2;i++) { + for (channel = 0; channel < 2; channel++) { + for (i=0;i<2;i++) { Direction = direction[i]; lane0 = 4; lane1 = 5; ratio = 0; dqs_delay = CalcEccDQSPos(channel, lane0, lane1, ratio, Direction, dqs_delay_a); @@ -1509,7 +1509,7 @@ static void SetEccDQSRdWrPos(const struct mem_controller *ctrl, struct sys_info static unsigned train_DqsRcvrEn(const struct mem_controller *ctrl, unsigned Pass, struct sys_info *sysinfo) { print_debug_dqs("\ntrain_DqsRcvrEn: begin ctrl ", ctrl->node_id, 0); - if(TrainRcvrEn(ctrl, Pass, sysinfo)) { + if (TrainRcvrEn(ctrl, Pass, sysinfo)) { return 1; } print_debug_dqs("\ntrain_DqsRcvrEn: end ctrl ", ctrl->node_id, 0); @@ -1519,7 +1519,7 @@ static unsigned train_DqsRcvrEn(const struct mem_controller *ctrl, unsigned Pass static unsigned train_DqsPos(const struct mem_controller *ctrl, struct sys_info *sysinfo) { print_debug_dqs("\ntrain_DqsPos: begin ctrl ", ctrl->node_id, 0); - if(TrainDQSRdWrPos(ctrl, sysinfo) != 0) { + if (TrainDQSRdWrPos(ctrl, sysinfo) != 0) { printk(BIOS_ERR, "\nDQS Training Rd Wr failed ctrl%02x\n", ctrl->node_id); return 1; } @@ -1540,18 +1540,18 @@ static void f0_svm_workaround(int controllers, const struct mem_controller *ctrl print_debug_addr("dqs_timing: tsc1[8] :", tsc1); - for(i = 0; i < controllers; i++) { + for (i = 0; i < controllers; i++) { if (!sysinfo->ctrl_present[i]) continue; /* Skip everything if I don't have any memory on this controller */ - if(sysinfo->meminfo[i].dimm_mask==0x00) continue; + if (sysinfo->meminfo[i].dimm_mask==0x00) continue; uint32_t dword; cpu_f0_f1[i] = is_cpu_pre_f2_in_bsp(i); - if(!cpu_f0_f1[i]) continue; + if (!cpu_f0_f1[i]) continue; dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL); dword &= ~DC_DqsRcvEnTrain; @@ -1567,7 +1567,7 @@ static void f0_svm_workaround(int controllers, const struct mem_controller *ctrl print_debug_dqs_tsc("begin: tsc1", i, tsc1[i].hi, tsc1[i].lo, 2); dword = tsc1[i].lo + tsc0[i].lo; - if((dword<tsc1[i].lo) || (dword<tsc0[i].lo)) { + if ((dword<tsc1[i].lo) || (dword<tsc0[i].lo)) { tsc1[i].hi++; } tsc1[i].lo = dword; @@ -1577,14 +1577,14 @@ static void f0_svm_workaround(int controllers, const struct mem_controller *ctrl } - for(i = 0; i < controllers; i++) { + for (i = 0; i < controllers; i++) { if (!sysinfo->ctrl_present[i]) continue; /* Skip everything if I don't have any memory on this controller */ - if(sysinfo->meminfo[i].dimm_mask==0x00) continue; + if (sysinfo->meminfo[i].dimm_mask==0x00) continue; - if(!cpu_f0_f1[i]) continue; + if (!cpu_f0_f1[i]) continue; tsc_t tsc; @@ -1672,7 +1672,7 @@ static unsigned int range_to_mtrr(unsigned int reg, if (!range_sizek || (reg >= 8)) { return reg; } - while(range_sizek) { + while (range_sizek) { unsigned long max_align, align; unsigned long sizek; /* Compute the maximum size I can make a range */ @@ -1735,7 +1735,7 @@ static void setup_mtrr_dqs(unsigned tom_k, unsigned tom2_k) range_to_mtrr(2, 0, tom_k,4*1024*1024, MTRR_TYPE_WRBACK, 40); //[4G, TOM2) - if(tom2_k) { + if (tom2_k) { //enable tom2 and type msr = rdmsr(SYSCFG_MSR); msr.lo |= (1<<21) | (1<<22); //MtrrTom2En and Tom2ForceMemTypeWB @@ -1761,12 +1761,12 @@ static void clear_mtrr_dqs(unsigned tom2_k) wrmsr(0x258, msr); //[1M, TOM) - for(i=0x204;i<0x210;i++) { + for (i=0x204;i<0x210;i++) { wrmsr(i, msr); } //[4G, TOM2) - if(tom2_k) { + if (tom2_k) { //enable tom2 and type msr = rdmsr(SYSCFG_MSR); msr.lo &= ~((1<<21) | (1<<22)); //MtrrTom2En and Tom2ForceMemTypeWB @@ -1794,8 +1794,8 @@ static unsigned get_htic_bit(unsigned i, unsigned bit) static void wait_till_sysinfo_in_ram(void) { - while(1) { - if(get_htic_bit(0, 9)) return; + while (1) { + if (get_htic_bit(0, 9)) return; } } #endif @@ -1909,26 +1909,26 @@ static void dqs_timing(int controllers, const struct mem_controller *ctrl, struc //need to enable mtrr, so dqs training could access the test address setup_mtrr_dqs(sysinfo->tom_k, sysinfo->tom2_k); - for(i = 0; i < controllers; i++) { + for (i = 0; i < controllers; i++) { if (!sysinfo->ctrl_present[ i ]) continue; /* Skip everything if I don't have any memory on this controller */ - if(sysinfo->meminfo[i].dimm_mask==0x00) continue; + if (sysinfo->meminfo[i].dimm_mask==0x00) continue; fill_mem_cs_sysinfo(i, ctrl+i, sysinfo); } tsc[0] = rdtsc(); - for(i = 0; i < controllers; i++) { + for (i = 0; i < controllers; i++) { if (!sysinfo->ctrl_present[ i ]) continue; /* Skip everything if I don't have any memory on this controller */ - if(sysinfo->meminfo[i].dimm_mask==0x00) continue; + if (sysinfo->meminfo[i].dimm_mask==0x00) continue; printk(BIOS_DEBUG, "DQS Training:RcvrEn:Pass1: %02x\n", i); - if(train_DqsRcvrEn(ctrl+i, 1, sysinfo)) goto out; + if (train_DqsRcvrEn(ctrl+i, 1, sysinfo)) goto out; printk(BIOS_DEBUG, " done\n"); } @@ -1938,28 +1938,28 @@ static void dqs_timing(int controllers, const struct mem_controller *ctrl, struc #endif tsc[2] = rdtsc(); - for(i = 0; i < controllers; i++) { + for (i = 0; i < controllers; i++) { if (!sysinfo->ctrl_present[i]) continue; /* Skip everything if I don't have any memory on this controller */ - if(sysinfo->meminfo[i].dimm_mask==0x00) continue; + if (sysinfo->meminfo[i].dimm_mask==0x00) continue; printk(BIOS_DEBUG, "DQS Training:DQSPos: %02x\n", i); - if(train_DqsPos(ctrl+i, sysinfo)) goto out; + if (train_DqsPos(ctrl+i, sysinfo)) goto out; printk(BIOS_DEBUG, " done\n"); } tsc[3] = rdtsc(); - for(i = 0; i < controllers; i++) { + for (i = 0; i < controllers; i++) { if (!sysinfo->ctrl_present[i]) continue; /* Skip everything if I don't have any memory on this controller */ - if(sysinfo->meminfo[i].dimm_mask==0x00) continue; + if (sysinfo->meminfo[i].dimm_mask==0x00) continue; printk(BIOS_DEBUG, "DQS Training:RcvrEn:Pass2: %02x\n", i); - if(train_DqsRcvrEn(ctrl+i, 2, sysinfo)) goto out; + if (train_DqsRcvrEn(ctrl+i, 2, sysinfo)) goto out; printk(BIOS_DEBUG, " done\n"); sysinfo->mem_trained[i]=1; #if CONFIG_HAVE_ACPI_RESUME @@ -1972,7 +1972,7 @@ out: clear_mtrr_dqs(sysinfo->tom2_k); - for(i=0;i<5;i++) { + for (i=0;i<5;i++) { print_debug_dqs_tsc_x("DQS Training:tsc", i, tsc[i].hi, tsc[i].lo); } @@ -1992,7 +1992,7 @@ static void dqs_timing(int i, const struct mem_controller *ctrl, struct sys_info tsc_t tsc[4]; - if(sysinfo->mem_trained[i] != 0x80) return; + if (sysinfo->mem_trained[i] != 0x80) return; #if CONFIG_MEM_TRAIN_SEQ == 1 //need to enable mtrr, so dqs training could access the test address @@ -2001,39 +2001,39 @@ static void dqs_timing(int i, const struct mem_controller *ctrl, struct sys_info fill_mem_cs_sysinfo(i, ctrl, sysinfo); - if(v) { + if (v) { tsc[0] = rdtsc(); printk(BIOS_DEBUG, "set DQS timing:RcvrEn:Pass1: %02x\n", i); } - if(train_DqsRcvrEn(ctrl, 1, sysinfo)) { + if (train_DqsRcvrEn(ctrl, 1, sysinfo)) { sysinfo->mem_trained[i]=0x81; // goto out; } - if(v) { + if (v) { printk(BIOS_DEBUG, " done\n"); tsc[1] = rdtsc(); printk(BIOS_DEBUG, "set DQS timing:DQSPos: %02x\n", i); } - if(train_DqsPos(ctrl, sysinfo)) { + if (train_DqsPos(ctrl, sysinfo)) { sysinfo->mem_trained[i]=0x82; // goto out; } - if(v) { + if (v) { printk(BIOS_DEBUG, " done\n"); tsc[2] = rdtsc(); printk(BIOS_DEBUG, "set DQS timing:RcvrEn:Pass2: %02x\n", i); } - if(train_DqsRcvrEn(ctrl, 2, sysinfo)){ + if (train_DqsRcvrEn(ctrl, 2, sysinfo)){ sysinfo->mem_trained[i]=0x83; // goto out; } - if(v) { + if (v) { printk(BIOS_DEBUG, " done\n"); tsc[3] = rdtsc(); @@ -2044,13 +2044,13 @@ out: clear_mtrr_dqs(sysinfo->tom2_k); #endif - if(v) { - for(ii=0;ii<4;ii++) { + if (v) { + for (ii=0;ii<4;ii++) { print_debug_dqs_tsc_x("Total DQS Training : tsc ", ii, tsc[ii].hi, tsc[ii].lo); } } - if(sysinfo->mem_trained[i] == 0x80) { + if (sysinfo->mem_trained[i] == 0x80) { sysinfo->mem_trained[i]=1; } @@ -2069,7 +2069,7 @@ static void train_ram(unsigned nodeid, struct sys_info *sysinfo, struct sys_info static inline void train_ram_on_node(unsigned nodeid, unsigned coreid, struct sys_info *sysinfo, unsigned retcall) { - if(coreid) return; // only do it on core0 + if (coreid) return; // only do it on core0 struct sys_info *sysinfox; uintptr_t migrated_base = CONFIG_RAMTOP - car_data_size(); @@ -2077,7 +2077,7 @@ static inline void train_ram_on_node(unsigned nodeid, unsigned coreid, struct sy wait_till_sysinfo_in_ram(); // use pci to get it - if(sysinfox->mem_trained[nodeid] == 0x80) { + if (sysinfox->mem_trained[nodeid] == 0x80) { #if 0 sysinfo->tom_k = sysinfox->tom_k; sysinfo->tom2_k = sysinfox->tom2_k; diff --git a/src/northbridge/amd/amdk8/raminit_test.c b/src/northbridge/amd/amdk8/raminit_test.c index 87e281d645..597b689f7c 100644 --- a/src/northbridge/amd/amdk8/raminit_test.c +++ b/src/northbridge/amd/amdk8/raminit_test.c @@ -96,7 +96,7 @@ static void pci_write_config32(device_t dev, unsigned where, uint32_t value) #define PCI_DEV_INVALID (0xffffffffU) static device_t pci_locate_device(unsigned pci_id, device_t dev) { - for(; dev <= PCI_DEV(255, 31, 7); dev += PCI_DEV(0,0,1)) { + for (; dev <= PCI_DEV(255, 31, 7); dev += PCI_DEV(0,0,1)) { unsigned int id; id = pci_read_config32(dev, 0); if (id == pci_id) { @@ -131,7 +131,7 @@ unsigned long log2(unsigned long x) write(STDERR_FILENO, errmsg, sizeof(errmsg) - 1); hlt(); } - for(; i > x; i >>= 1, pow--) + for (; i > x; i >>= 1, pow--) ; return pow; @@ -410,7 +410,7 @@ done: static void test2(void) { int i; - for(i = 0; i < 0x48; i++) { + for (i = 0; i < 0x48; i++) { do_test2(i); } diff --git a/src/northbridge/amd/amdk8/reset_test.c b/src/northbridge/amd/amdk8/reset_test.c index 81da5920cb..cfc5dda718 100644 --- a/src/northbridge/amd/amdk8/reset_test.c +++ b/src/northbridge/amd/amdk8/reset_test.c @@ -57,7 +57,7 @@ static unsigned node_link_to_bus(unsigned node, unsigned link) { u8 reg; - for(reg = 0xE0; reg < 0xF0; reg += 0x04) { + for (reg = 0xE0; reg < 0xF0; reg += 0x04) { u32 config_map; config_map = pci_read_config32(PCI_DEV(0, 0x18, 1), reg); if ((config_map & 3) != 3) { diff --git a/src/northbridge/amd/amdk8/setup_resource_map.c b/src/northbridge/amd/amdk8/setup_resource_map.c index 230459ab7f..f8f2bbfe70 100644 --- a/src/northbridge/amd/amdk8/setup_resource_map.c +++ b/src/northbridge/amd/amdk8/setup_resource_map.c @@ -8,7 +8,7 @@ void setup_resource_map_offset(const unsigned int *register_values, int max, uns #if RES_DEBUG printk(BIOS_DEBUG, "setting up resource map offset....\n"); #endif - for(i = 0; i < max; i += 3) { + for (i = 0; i < max; i += 3) { device_t dev; unsigned where; unsigned long reg = 0; @@ -46,7 +46,7 @@ static void setup_resource_map_x_offset(const unsigned int *register_values, int #if RES_DEBUG printk(BIOS_DEBUG, "setting up resource map ex offset....\n"); #endif - for(i = 0; i < max; i += 4) { + for (i = 0; i < max; i += 4) { #if RES_DEBUG printk(BIOS_DEBUG, "%04x: %02x %08x <- & %08x | %08x\n", i>>2, register_values[i], @@ -128,7 +128,7 @@ static void setup_io_resource_map(const unsigned int *register_values, int max) { int i; - for(i = 0; i < max; i += 3) { + for (i = 0; i < max; i += 3) { unsigned where; unsigned long reg; @@ -163,7 +163,7 @@ static void setup_mem_resource_map(const unsigned int *register_values, int max) { int i; - for(i = 0; i < max; i += 3) { + for (i = 0; i < max; i += 3) { unsigned where; unsigned long reg; #if 0 diff --git a/src/northbridge/amd/amdk8/util.asl b/src/northbridge/amd/amdk8/util.asl index febfd2958e..6a9b69f267 100644 --- a/src/northbridge/amd/amdk8/util.asl +++ b/src/northbridge/amd/amdk8/util.asl @@ -54,7 +54,7 @@ Scope (\_SB) Method (GHCE, 1, NotSerialized) // check if the HC enabled { Store (DerefOf (Index (\_SB.PCI0.HCLK, Arg0)), Local1) - if(LEqual ( And(Local1, 0x01), 0x01)) { Return (0x0F) } + if (LEqual ( And(Local1, 0x01), 0x01)) { Return (0x0F) } Else { Return (0x00) } } diff --git a/src/northbridge/amd/amdmct/mct/mct_d.c b/src/northbridge/amd/amdmct/mct/mct_d.c index 0914065d2a..16e67dfdf2 100644 --- a/src/northbridge/amd/amdmct/mct/mct_d.c +++ b/src/northbridge/amd/amdmct/mct/mct_d.c @@ -925,7 +925,7 @@ static void ClearDCT_D(struct MCTStatStruc *pMCTstat, reg_end = 0xA4 + 0x100 * dct; } - while(reg < reg_end) { + while (reg < reg_end) { Set_NB32(dev, reg, val); reg += 4; } @@ -1694,7 +1694,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat, p = Tab_S1CLKDis; dword = 0; - while(dword < MAX_DIMMS_SUPPORTED) { + while (dword < MAX_DIMMS_SUPPORTED) { val = p[dword]; print_tx("DramTimingLo: val=", val); if (!(pDCTstat->DIMMValid & (1<<val))) @@ -3518,7 +3518,7 @@ static void InitPhyCompensation(struct MCTStatStruc *pMCTstat, static void WaitRoutine_D(u32 time) { - while(time) { + while (time) { _EXECFENCE; time--; } @@ -3877,7 +3877,7 @@ static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat, addr = HWCR; _RDMSR(addr, &lo, &hi); - if(lo & (1<<17)) { /* save the old value */ + if (lo & (1<<17)) { /* save the old value */ wrap32dis = 1; } lo |= (1<<17); /* HWCR.wrap32dis */ @@ -3906,7 +3906,7 @@ static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat, } } } - if(!wrap32dis) { + if (!wrap32dis) { addr = HWCR; _RDMSR(addr, &lo, &hi); lo &= ~(1<<17); /* restore HWCR.wrap32dis */ diff --git a/src/northbridge/amd/amdmct/mct/mct_d_gcc.h b/src/northbridge/amd/amdmct/mct/mct_d_gcc.h index 68b6bc2ea8..fd39b38170 100644 --- a/src/northbridge/amd/amdmct/mct/mct_d_gcc.h +++ b/src/northbridge/amd/amdmct/mct/mct_d_gcc.h @@ -61,8 +61,8 @@ static u32 bsr(u32 x) u8 i; u32 ret = 0; - for(i=31; i>0; i--) { - if(x & (1<<i)) { + for (i=31; i>0; i--) { + if (x & (1<<i)) { ret = i; break; } @@ -78,8 +78,8 @@ static u32 bsf(u32 x) u8 i; u32 ret = 32; - for(i=0; i<32; i++) { - if(x & (1<<i)) { + for (i=0; i<32; i++) { + if (x & (1<<i)) { ret = i; break; } @@ -343,7 +343,7 @@ static u32 stream_to_int(u8 const *p) val = 0; - for(i=3; i>=0; i--) { + for (i=3; i>=0; i--) { val <<= 8; valx = *(p+i); val |= valx; diff --git a/src/northbridge/amd/amdmct/mct/mctardk3.c b/src/northbridge/amd/amdmct/mct/mctardk3.c index dbc377cea3..327acbc250 100644 --- a/src/northbridge/amd/amdmct/mct/mctardk3.c +++ b/src/northbridge/amd/amdmct/mct/mctardk3.c @@ -31,7 +31,7 @@ void mctGet_PS_Cfg_D(struct MCTStatStruc *pMCTstat, &(pDCTstat->CH_ADDR_TMG[dct]), &(pDCTstat->CH_ODC_CTL[dct])); - if(pDCTstat->MAdimms[dct] == 1) + if (pDCTstat->MAdimms[dct] == 1) pDCTstat->CH_ODC_CTL[dct] |= 0x20000000; /* 75ohms */ else pDCTstat->CH_ODC_CTL[dct] |= 0x10000000; /* 150ohms */ @@ -176,7 +176,7 @@ static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload, *AddrTmgCTL = 0; *ODC_CTL = 0; - if(mctGet_NVbits(NV_MAX_DIMMS) == 8) { + if (mctGet_NVbits(NV_MAX_DIMMS) == 8) { /* 8 DIMM Table */ p = Table_ATC_ODC_8D_D; //FIXME Add Ax support @@ -188,8 +188,8 @@ static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload, while (*p != 0xFF) { if ((MAAdimms == *(p+10)) || (*(p+10 ) == 0xFE)) { - if((*p == Speed) || (*p == 0xFE)) { - if(MAAload <= *(p+1)) { + if ((*p == Speed) || (*p == 0xFE)) { + if (MAAload <= *(p+1)) { *AddrTmgCTL = stream_to_int((u8*)(p+2)); *ODC_CTL = stream_to_int((u8*)(p+6)); break; diff --git a/src/northbridge/amd/amdmct/mct/mctardk4.c b/src/northbridge/amd/amdmct/mct/mctardk4.c index 8899c55b17..cac2342097 100644 --- a/src/northbridge/amd/amdmct/mct/mctardk4.c +++ b/src/northbridge/amd/amdmct/mct/mctardk4.c @@ -33,7 +33,7 @@ void mctGet_PS_Cfg_D(struct MCTStatStruc *pMCTstat, // print_tx("1 CH_ODC_CTL: ", pDCTstat->CH_ODC_CTL[dct]); // print_tx("1 CH_ADDR_TMG: ", pDCTstat->CH_ADDR_TMG[dct]); - if(pDCTstat->MAdimms[dct] == 1) + if (pDCTstat->MAdimms[dct] == 1) pDCTstat->CH_ODC_CTL[dct] |= 0x20000000; /* 75ohms */ else pDCTstat->CH_ODC_CTL[dct] |= 0x10000000; /* 150ohms */ @@ -107,9 +107,9 @@ static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload, *CMDmode = 1; // FIXME: add Ax support - if(MAAdimms == 0) { + if (MAAdimms == 0) { *ODC_CTL = 0x00111222; - if(Speed == 3) + if (Speed == 3) *AddrTmgCTL = 0x00202220; else if (Speed == 2) *AddrTmgCTL = 0x002F2F00; @@ -121,21 +121,21 @@ static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload, *AddrTmgCTL = 0x002F2020; else *AddrTmgCTL = 0x002F2F2F; - } else if(MAAdimms == 1) { - if(Speed == 4) { + } else if (MAAdimms == 1) { + if (Speed == 4) { *CMDmode = 2; *AddrTmgCTL = 0x00202520; *ODC_CTL = 0x00113222; - } else if(Speed == 5) { + } else if (Speed == 5) { *CMDmode = 2; *AddrTmgCTL = 0x002F2020; *ODC_CTL = 0x00113222; } else { *CMDmode = 1; *ODC_CTL = 0x00111222; - if(Speed == 3) { + if (Speed == 3) { *AddrTmgCTL = 0x00202220; - } else if(Speed == 2) { + } else if (Speed == 2) { if (MAAload == 4) *AddrTmgCTL = 0x002B2F00; else if (MAAload == 16) @@ -144,9 +144,9 @@ static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload, *AddrTmgCTL = 0x002F2F00; else *AddrTmgCTL = 0x002F2F00; - } else if(Speed == 1) { + } else if (Speed == 1) { *AddrTmgCTL = 0x002F2F00; - } else if(Speed == 5) { + } else if (Speed == 5) { *AddrTmgCTL = 0x002F2020; } else { *AddrTmgCTL = 0x002F2F2F; @@ -156,8 +156,8 @@ static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload, *CMDmode = 2; p = Table_ATC_ODC_D_Bx; do { - if(Speed == *p) { - if(MAAload <= *(p+1)) { + if (Speed == *p) { + if (MAAload <= *(p+1)) { *AddrTmgCTL = stream_to_int(p+2); *ODC_CTL = stream_to_int(p+6); break; diff --git a/src/northbridge/amd/amdmct/mct/mctcsi_d.c b/src/northbridge/amd/amdmct/mct/mctcsi_d.c index 45b926b858..e8d26da45f 100644 --- a/src/northbridge/amd/amdmct/mct/mctcsi_d.c +++ b/src/northbridge/amd/amdmct/mct/mctcsi_d.c @@ -64,7 +64,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat, val = Get_NB32(dev, reg); val >>= (ChipSel>>1)<<2; val &= 0x0f; - if(EnChipSels == 1) + if (EnChipSels == 1) BankEncd = val; else /*If number of Rows/Columns not equal, skip */ @@ -79,13 +79,13 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat, } if (DoIntlv) { - if(!_CsIntCap) { + if (!_CsIntCap) { pDCTstat->ErrStatus |= 1<<SB_BkIntDis; DoIntlv = 0; } } - if(DoIntlv) { + if (DoIntlv) { val = Tab_int_D[BankEncd]; if (pDCTstat->Status & (1<<SB_128bitmode)) val++; @@ -113,7 +113,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat, val |= val_hi; Set_NB32(dev, reg, val); - if(ChipSel & 1) + if (ChipSel & 1) continue; reg = 0x60 + ((ChipSel>>1)<<2) + reg_off; /*Dram CS Mask 0 */ diff --git a/src/northbridge/amd/amdmct/mct/mctdqs_d.c b/src/northbridge/amd/amdmct/mct/mctdqs_d.c index abc5838c54..67ff823cb8 100644 --- a/src/northbridge/amd/amdmct/mct/mctdqs_d.c +++ b/src/northbridge/amd/amdmct/mct/mctdqs_d.c @@ -1118,7 +1118,7 @@ u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat, /* New stuff */ val += ((1 << 21) >> 8); /* Add 2MB offset to avoid compat area */ if (val >= MCT_TRNG_KEEPOUT_START) { - while(val < MCT_TRNG_KEEPOUT_END) + while (val < MCT_TRNG_KEEPOUT_END) val += (1 << (15-8)); /* add 32K */ } diff --git a/src/northbridge/amd/amdmct/mct/mctecc_d.c b/src/northbridge/amd/amdmct/mct/mctecc_d.c index b2ac849b3d..5c1dc3a53c 100644 --- a/src/northbridge/amd/amdmct/mct/mctecc_d.c +++ b/src/northbridge/amd/amdmct/mct/mctecc_d.c @@ -121,12 +121,12 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) val = Get_NB32(dev, reg); /* WE/RE is checked */ - if((val & 3)==3) { /* Node has dram populated */ + if ((val & 3)==3) { /* Node has dram populated */ /* Negate 'all nodes/dimms ECC' flag if non ecc memory populated */ - if( pDCTstat->Status & (1<<SB_ECCDIMMs)) { + if ( pDCTstat->Status & (1<<SB_ECCDIMMs)) { LDramECC = isDramECCEn_D(pDCTstat); - if(pDCTstat->ErrCode != SC_RunningOK) { + if (pDCTstat->ErrCode != SC_RunningOK) { pDCTstat->Status &= ~(1 << SB_ECCDIMMs); if (!OB_NBECC) { pDCTstat->ErrStatus |= (1 << SB_DramECCDis); @@ -137,7 +137,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) } else { AllECC = 0; } - if(LDramECC) { /* if ECC is enabled on this dram */ + if (LDramECC) { /* if ECC is enabled on this dram */ if (OB_NBECC) { mct_EnableDatIntlv_D(pMCTstat, pDCTstat); dev = pDCTstat->dev_nbmisc; @@ -160,7 +160,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) } /* if Node present */ } - if(AllECC) + if (AllECC) pMCTstat->GStatus |= 1<<GSB_ECCDIMMs; else pMCTstat->GStatus &= ~(1<<GSB_ECCDIMMs); @@ -176,11 +176,11 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) val = Get_NB32(pDCTstat->dev_map, reg); curBase = val & 0xffff0000; /*WE/RE is checked because memory config may have been */ - if((val & 3)==3) { /* Node has dram populated */ + if ((val & 3)==3) { /* Node has dram populated */ if (isDramECCEn_D(pDCTstat)) { /* if ECC is enabled on this dram */ dev = pDCTstat->dev_nbmisc; val = curBase << 8; - if(OB_ECCRedir) { + if (OB_ECCRedir) { val |= (1<<0); /* enable redirection */ } Set_NB32(dev, 0x5C, val); /* Dram Scrub Addr Low */ @@ -205,7 +205,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) } /*if Node present */ } - if(mctGet_NVbits(NV_SyncOnUnEccEn)) + if (mctGet_NVbits(NV_SyncOnUnEccEn)) setSyncOnUnEccEn_D(pMCTstat, pDCTstatA); mctHookAfterECC(); @@ -239,8 +239,8 @@ static void setSyncOnUnEccEn_D(struct MCTStatStruc *pMCTstat, reg = 0x40+(Node<<3); /* Dram Base Node 0 + index*/ val = Get_NB32(pDCTstat->dev_map, reg); /*WE/RE is checked because memory config may have been*/ - if((val & 3)==3) { /* Node has dram populated*/ - if( isDramECCEn_D(pDCTstat)) { + if ((val & 3)==3) { /* Node has dram populated*/ + if ( isDramECCEn_D(pDCTstat)) { /*if ECC is enabled on this dram*/ dev = pDCTstat->dev_nbmisc; reg = 0x44; /* MCA NB Configuration*/ @@ -274,7 +274,7 @@ static u32 GetScrubAddr_D(u32 Node) lo = Get_NB32(dev, regx); /* Scrub Addr High again, detect 32-bit wrap */ val = Get_NB32(dev, reg); - if(val != hi) { + if (val != hi) { hi = val; /* Scrub Addr Low again, if wrap occurred */ lo = Get_NB32(dev, regx); } @@ -295,16 +295,16 @@ static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat) u8 ch_end; u8 isDimmECCEn = 0; - if(pDCTstat->GangedMode) { + if (pDCTstat->GangedMode) { ch_end = 1; } else { ch_end = 2; } - for(i=0; i<ch_end; i++) { - if(pDCTstat->DIMMValidDCT[i] > 0){ + for (i=0; i<ch_end; i++) { + if (pDCTstat->DIMMValidDCT[i] > 0){ reg = 0x90 + i * 0x100; /* Dram Config Low */ val = Get_NB32(dev, reg); - if(val & (1<<DimmEcEn)) { + if (val & (1<<DimmEcEn)) { /* set local flag 'dram ecc capable' */ isDimmECCEn = 1; break; diff --git a/src/northbridge/amd/amdmct/mct/mctgr.c b/src/northbridge/amd/amdmct/mct/mctgr.c index 01d729d701..a13d4e2f0f 100644 --- a/src/northbridge/amd/amdmct/mct/mctgr.c +++ b/src/northbridge/amd/amdmct/mct/mctgr.c @@ -31,13 +31,13 @@ u32 mct_AdjustMemClkDis_GR(struct DCTStatStruc *pDCTstat, u32 dct, DramTimingLo = val; /* Dram Timing Low (owns Clock Enable bits) */ NewDramTimingLo = Get_NB32(dev, 0x88 + reg_off); - if(mctGet_NVbits(NV_AllMemClks)==0) { + if (mctGet_NVbits(NV_AllMemClks)==0) { /*Special Jedec SPD diagnostic bit - "enable all clocks"*/ - if(!(pDCTstat->Status & (1<<SB_DiagClks))) { - for(i=0; i<MAX_DIMMS_SUPPORTED; i++) { + if (!(pDCTstat->Status & (1<<SB_DiagClks))) { + for (i=0; i<MAX_DIMMS_SUPPORTED; i++) { val = Tab_GRCLKDis[i]; - if(val<8) { - if(!(pDCTstat->DIMMValidDCT[dct] & (1<<val))) { + if (val<8) { + if (!(pDCTstat->DIMMValidDCT[dct] & (1<<val))) { /* disable memclk */ NewDramTimingLo |= (1<<(i+1)); } @@ -61,7 +61,7 @@ u32 mct_AdjustDramConfigLo_GR(struct DCTStatStruc *pDCTstat, u32 dct, u32 val) ; mov cx,PA_NBMISC+44h ;MCA NB Configuration ; call Get_NB32n_D ; bt eax,22 ;EccEn - ; .if(CARRY?) + ; .if (CARRY?) ; btr eax,BurstLength32 ; .endif */ @@ -72,7 +72,7 @@ u32 mct_AdjustDramConfigLo_GR(struct DCTStatStruc *pDCTstat, u32 dct, u32 val) void mct_AdjustMemHoist_GR(struct DCTStatStruc *pDCTstat, u32 base, u32 HoleSize) { u32 val; - if(base >= pDCTstat->DCTHoleBase) { + if (base >= pDCTstat->DCTHoleBase) { u32 dev = pDCTstat->dev_dct; base += HoleSize; base >>= 27 - 8; diff --git a/src/northbridge/amd/amdmct/mct/mctmtr_d.c b/src/northbridge/amd/amdmct/mct/mctmtr_d.c index 06c642a865..5e91947e71 100644 --- a/src/northbridge/amd/amdmct/mct/mctmtr_d.c +++ b/src/northbridge/amd/amdmct/mct/mctmtr_d.c @@ -44,15 +44,15 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, */ val = mctGet_NVbits(NV_BottomIO); - if(val == 0) + if (val == 0) val++; Bottom32bIO = val << (24-8); val = pMCTstat->SysLimit + 1; - if(val <= _4GB_RJ8) { + if (val <= _4GB_RJ8) { Bottom40bIO = 0; - if(Bottom32bIO >= val) + if (Bottom32bIO >= val) Bottom32bIO = val; } else { Bottom40bIO = val; @@ -90,7 +90,7 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, /* Base */ /* Limit */ /* MtrrAddr */ - if(addr == -1) /* ran out of MTRRs?*/ + if (addr == -1) /* ran out of MTRRs?*/ pMCTstat->GStatus |= 1<<GSB_MTRRshort; pMCTstat->Sub4GCacheTop = Cache32bTOP<<8; @@ -104,7 +104,7 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, _WRMSR(addr, lo, hi); print_tx("\t CPUMemTyping: Bottom32bIO:", Bottom32bIO); print_tx("\t CPUMemTyping: Bottom40bIO:", Bottom40bIO); - if(Bottom40bIO) { + if (Bottom40bIO) { hi = Bottom40bIO >> 24; lo = Bottom40bIO << 8; if (mctSetNodeBoundary_D()) @@ -114,7 +114,7 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, } addr = 0xC0010010; /* SYS_CFG */ _RDMSR(addr, &lo, &hi); - if(Bottom40bIO) { + if (Bottom40bIO) { lo |= (1<<21); /* MtrrTom2En=1 */ lo |= (1<<22); /* Tom2ForceMemTypeWB */ } else { @@ -163,7 +163,7 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType) val = curBase = Base; curLimit = *pLimit; addr = *pMtrrAddr; - while((addr >= 0x200) && (addr < 0x20C) && (val < *pLimit)) { + while ((addr >= 0x200) && (addr < 0x20C) && (val < *pLimit)) { /* start with "ascending" code path */ /* alignment (largest block size)*/ valx = 1 << bsf(curBase); @@ -171,7 +171,7 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType) /* largest legal limit, given current non-zero range Base*/ valx += curBase; - if((curBase == 0) || (*pLimit < valx)) { + if ((curBase == 0) || (*pLimit < valx)) { /* flop direction to "descending" code path*/ valx = 1<<bsr(*pLimit - curBase); curSize = valx; @@ -194,7 +194,7 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType) curBase = val; /* next Base = current Limit (loop exit)*/ addr++; /* next MTRR pair addr */ } - if(val < *pLimit) { + if (val < *pLimit) { *pLimit = val; addr = -1; } @@ -238,7 +238,7 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat addr = 0x200; lo = 0; hi = lo; - while( addr < 0x20C) { + while ( addr < 0x20C) { _WRMSR(addr, lo, hi); /* prog. MTRR with current region Mask */ addr++; /* next MTRR pair addr */ } @@ -248,7 +248,7 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat *======================================================================*/ print_tx("\t UMAMemTyping_D: Cache32bTOP:", Cache32bTOP); SetMTRRrangeWB_D(0, &Cache32bTOP, &addr); - if(addr == -1) /* ran out of MTRRs?*/ + if (addr == -1) /* ran out of MTRRs?*/ pMCTstat->GStatus |= 1<<GSB_MTRRshort; } } diff --git a/src/northbridge/amd/amdmct/mct/mctndi_d.c b/src/northbridge/amd/amdmct/mct/mctndi_d.c index 28c2751a56..32c319946b 100644 --- a/src/northbridge/amd/amdmct/mct/mctndi_d.c +++ b/src/northbridge/amd/amdmct/mct/mctndi_d.c @@ -66,7 +66,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, _SWHole = 0; } - if(!_SWHole) { + if (!_SWHole) { Base = Get_NB32(dev0, reg0); if (Base & 1) { NodesWmem++; @@ -85,7 +85,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, * are the same on all nodes */ DctSelBase = Get_NB32(pDCTstat->dev_dct, 0x114); - if(DctSelBase) { + if (DctSelBase) { DctSelBase <<= 8; if ( pDCTstat->Status & (1 << SB_HWHole)) { if (DctSelBase >= 0x1000000) { @@ -150,7 +150,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, Base = ((Nodes - 1) << 8) | 3; reg0 = 0x40; Node = 0; - while(Node < Nodes) { + while (Node < Nodes) { Set_NB32(dev0, reg0, Base); MemSize = MemSize0; MemSize--; @@ -164,7 +164,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, /* set base/limit to F1x120/124 per Node */ Node = 0; - while(Node < Nodes) { + while (Node < Nodes) { pDCTstat = pDCTstatA + Node; pDCTstat->NodeSysBase = 0; MemSize = MemSize0; @@ -185,7 +185,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, HoleBase = pMCTstat->HoleBase; if (Dct0MemSize >= HoleBase) { val = HWHoleSz; - if( Node == 0) { + if ( Node == 0) { val += Dct0MemSize; } } else { diff --git a/src/northbridge/amd/amdmct/mct/mctpro_d.c b/src/northbridge/amd/amdmct/mct/mctpro_d.c index a6d6bad164..95afebf30e 100644 --- a/src/northbridge/amd/amdmct/mct/mctpro_d.c +++ b/src/northbridge/amd/amdmct/mct/mctpro_d.c @@ -24,7 +24,7 @@ u32 procOdtWorkaround(struct DCTStatStruc *pDCTstat, u32 dct, u32 val) tmp = pDCTstat->LogicalCPUID; if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) { val &= 0x0FFFFFFF; - if(pDCTstat->MAdimms[dct] > 1) + if (pDCTstat->MAdimms[dct] > 1) val |= 0x10000000; } @@ -42,7 +42,7 @@ u32 OtherTiming_A_D(struct DCTStatStruc *pDCTstat, u32 val) uint64_t tmp; tmp = pDCTstat->LogicalCPUID; if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) { - if(!(val & (3<<12) )) + if (!(val & (3<<12) )) val |= 1<<12; } return val; @@ -59,13 +59,13 @@ void mct_ForceAutoPrecharge_D(struct DCTStatStruc *pDCTstat, u32 dct) tmp = pDCTstat->LogicalCPUID; if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) { - if(CheckNBCOFAutoPrechg(pDCTstat, dct)) { + if (CheckNBCOFAutoPrechg(pDCTstat, dct)) { dev = pDCTstat->dev_dct; reg_off = 0x100 * dct; reg = 0x90 + reg_off; /* Dram Configuration Lo */ val = Get_NB32(dev, reg); val |= 1<<ForceAutoPchg; - if(!pDCTstat->GangedMode) + if (!pDCTstat->GangedMode) val |= 1<<BurstLength32; Set_NB32(dev, reg, val); @@ -99,11 +99,11 @@ void mct_EndDQSTraining_D(struct MCTStatStruc *pMCTstat, u32 val; u32 Node; - for(Node = 0; Node < MAX_NODES_SUPPORTED; Node++) { + for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) { struct DCTStatStruc *pDCTstat; pDCTstat = pDCTstatA + Node; - if(!pDCTstat->NodePresent) break; + if (!pDCTstat->NodePresent) break; tmp = pDCTstat->LogicalCPUID; if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) { @@ -149,15 +149,15 @@ void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat, dev = pDCTstat->dev_dct; index = 0; - for(Channel = 0; Channel<2; Channel++) { + for (Channel = 0; Channel<2; Channel++) { index_reg = 0x98 + 0x100 * Channel; val = Get_NB32_index_wait(dev, index_reg, 0x0d004007); val |= 0x3ff; Set_NB32_index_wait(dev, index_reg, 0x0d0f4f07, val); } - for(Channel = 0; Channel<2; Channel++) { - if(pDCTstat->GangedMode && Channel) + for (Channel = 0; Channel<2; Channel++) { + if (pDCTstat->GangedMode && Channel) break; reg_off = 0x100 * Channel; reg = 0x78 + reg_off; @@ -167,11 +167,11 @@ void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat, Set_NB32(dev, reg, val); } - for(Channel = 0; Channel<2; Channel++) { + for (Channel = 0; Channel<2; Channel++) { reg_off = 0x100 * Channel; val = 0; index_reg = 0x98 + reg_off; - for( index = 0x30; index < (0x45 + 1); index++) { + for ( index = 0x30; index < (0x45 + 1); index++) { Set_NB32_index_wait(dev, index_reg, index, val); } } @@ -265,7 +265,7 @@ u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct) print_tx("NB COF:", valy >> NbDid); val = valy/valx; - if((val==3) && (valy%valx)) /* 3 < NClk/MemClk < 4 */ + if ((val==3) && (valy%valx)) /* 3 < NClk/MemClk < 4 */ ret = 1; return ret; @@ -286,8 +286,8 @@ void mct_BeforeDramInit_D(struct DCTStatStruc *pDCTstat, u32 dct) if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) { Speed = pDCTstat->Speed; /* MemClkFreq = 333MHz or 533MHz */ - if((Speed == 3) || (Speed == 2)) { - if(pDCTstat->GangedMode) { + if ((Speed == 3) || (Speed == 2)) { + if (pDCTstat->GangedMode) { ch_start = 0; ch_end = 2; } else { @@ -296,7 +296,7 @@ void mct_BeforeDramInit_D(struct DCTStatStruc *pDCTstat, u32 dct) } dev = pDCTstat->dev_dct; index = 0x0D00E001; - for(ch=ch_start; ch<ch_end; ch++) { + for (ch=ch_start; ch<ch_end; ch++) { index_reg = 0x98 + 0x100 * ch; val = Get_NB32_index(dev, index_reg, 0x0D00E001); val &= ~(0xf0); @@ -332,7 +332,7 @@ static u8 mct_checkFenceHoleAdjust_D(struct MCTStatStruc *pMCTstat, if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) { if (pDCTstat->Direction == DQS_WRITEDIR) { if ((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) { - if(DQSDelay == 13) { + if (DQSDelay == 13) { if (*result == 0xFF) { for (ByteLane = 0; ByteLane < 8; ByteLane++) { pDCTstat->DQSDelay = 13; diff --git a/src/northbridge/amd/amdmct/mct/mctsrc.c b/src/northbridge/amd/amdmct/mct/mctsrc.c index 1881a37c54..510cf0dd4c 100644 --- a/src/northbridge/amd/amdmct/mct/mctsrc.c +++ b/src/northbridge/amd/amdmct/mct/mctsrc.c @@ -86,7 +86,7 @@ static void SetupRcvrPattern(struct MCTStatStruc *pMCTstat, p_A = (u32 *)SetupDqsPattern_1PassB(pass); p_B = (u32 *)SetupDqsPattern_1PassA(pass); - for(i=0;i<16;i++) { + for (i=0;i<16;i++) { buf_a[i] = p_A[i]; buf_b[i] = p_B[i]; } @@ -99,7 +99,7 @@ static void SetupRcvrPattern(struct MCTStatStruc *pMCTstat, void mct_TrainRcvrEn_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 Pass) { - if(mct_checkNumberOfDqsRcvEn_1Pass(Pass)) + if (mct_checkNumberOfDqsRcvEn_1Pass(Pass)) dqsTrainRcvrEn_SW(pMCTstat, pDCTstat, Pass); } @@ -137,7 +137,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, dev = pDCTstat->dev_dct; ch_start = 0; - if(!pDCTstat->GangedMode) { + if (!pDCTstat->GangedMode) { ch_end = 2; } else { ch_end = 1; @@ -161,7 +161,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, print_t("TrainRcvrEn: 1\n"); cr4 = read_cr4(); - if(cr4 & ( 1 << 9)) { /* save the old value */ + if (cr4 & ( 1 << 9)) { /* save the old value */ _SSE2 = 1; } cr4 |= (1 << 9); /* OSFXSR enable SSE2 */ @@ -171,7 +171,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, msr = HWCR; _RDMSR(msr, &lo, &hi); //FIXME: Why use SSEDIS - if(lo & (1 << 17)) { /* save the old value */ + if (lo & (1 << 17)) { /* save the old value */ _Wrap32Dis = 1; } lo |= (1 << 17); /* HWCR.wrap32dis */ @@ -182,15 +182,15 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, _DisableDramECC = mct_DisableDimmEccEn_D(pMCTstat, pDCTstat); - if(pDCTstat->Speed == 1) { + if (pDCTstat->Speed == 1) { pDCTstat->T1000 = 5000; /* get the T1000 figure (cycle time (ns)*1K */ - } else if(pDCTstat->Speed == 2) { + } else if (pDCTstat->Speed == 2) { pDCTstat->T1000 = 3759; - } else if(pDCTstat->Speed == 3) { + } else if (pDCTstat->Speed == 3) { pDCTstat->T1000 = 3003; - } else if(pDCTstat->Speed == 4) { + } else if (pDCTstat->Speed == 4) { pDCTstat->T1000 = 2500; - } else if(pDCTstat->Speed == 5) { + } else if (pDCTstat->Speed == 5) { pDCTstat->T1000 = 1876; } else { pDCTstat->T1000 = 0; @@ -222,22 +222,22 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, print_debug_dqs("\t\tTrainRcvEnd52: index ", Addl_Index, 2); - if(!mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver)) { + if (!mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver)) { print_t("\t\t\tRank not enabled_D\n"); continue; } TestAddr0 = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, Channel, Receiver, &valid); - if(!valid) { /* Address not supported on current CS */ + if (!valid) { /* Address not supported on current CS */ print_t("\t\t\tAddress not supported on current CS\n"); continue; } TestAddr0B = TestAddr0 + (BigPagex8_RJ8 << 3); - if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver+1)) { + if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver+1)) { TestAddr1 = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, Channel, Receiver+1, &valid); - if(!valid) { /* Address not supported on current CS */ + if (!valid) { /* Address not supported on current CS */ print_t("\t\t\tAddress not supported on current CS+1\n"); continue; } @@ -266,11 +266,11 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, pDCTstat->DqsRcvEn_Saved = 0; - while(RcvrEnDly < RcvrEnDlyLimit) { /* sweep Delay value here */ + while (RcvrEnDly < RcvrEnDlyLimit) { /* sweep Delay value here */ print_debug_dqs("\t\t\tTrainRcvEn541: RcvrEnDly ", RcvrEnDly, 3); /* callback not required - if(mct_AdjustDelay_D(pDCTstat, RcvrEnDly)) + if (mct_AdjustDelay_D(pDCTstat, RcvrEnDly)) goto skipDly; */ @@ -278,7 +278,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, and odd steps alternate. The pointers to the patterns will be swaped at the end of the loop so that they correspond. */ - if(RcvrEnDly & 1) { + if (RcvrEnDly & 1) { PatternA = 1; PatternB = 0; } else { @@ -289,7 +289,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, mct_Write1LTestPattern_D(pMCTstat, pDCTstat, TestAddr0, PatternA); /* rank 0 of DIMM, testpattern 0 */ mct_Write1LTestPattern_D(pMCTstat, pDCTstat, TestAddr0B, PatternB); /* rank 0 of DIMM, testpattern 1 */ - if(_2Ranks) { + if (_2Ranks) { mct_Write1LTestPattern_D(pMCTstat, pDCTstat, TestAddr1, PatternA); /*rank 1 of DIMM, testpattern 0 */ mct_Write1LTestPattern_D(pMCTstat, pDCTstat, TestAddr1B, PatternB); /*rank 1 of DIMM, testpattern 1 */ } @@ -309,7 +309,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, // != 0x00 mean pass - if(Test0 == DQS_PASS) { + if (Test0 == DQS_PASS) { mct_Read1LTestPattern_D(pMCTstat, pDCTstat, TestAddr0B); /*cache fills */ /* ROM vs cache compare */ Test1 = mct_CompareTestPatternQW0_D(pMCTstat, pDCTstat, TestAddr0B, Channel, PatternB, Pass); @@ -318,11 +318,11 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, print_debug_dqs("\t\t\tTrainRcvEn543: Test1 result ", Test1, 3); - if(Test1 == DQS_PASS) { + if (Test1 == DQS_PASS) { CurrTestSide0 = DQS_PASS; } } - if(_2Ranks) { + if (_2Ranks) { mct_Read1LTestPattern_D(pMCTstat, pDCTstat, TestAddr1); /*cache fills */ /* ROM vs cache compare */ Test0 = mct_CompareTestPatternQW0_D(pMCTstat, pDCTstat, TestAddr1, Channel, PatternA, Pass); @@ -331,7 +331,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, print_debug_dqs("\t\t\tTrainRcvEn544: Test0 result ", Test0, 3); - if(Test0 == DQS_PASS) { + if (Test0 == DQS_PASS) { mct_Read1LTestPattern_D(pMCTstat, pDCTstat, TestAddr1B); /*cache fills */ /* ROM vs cache compare */ Test1 = mct_CompareTestPatternQW0_D(pMCTstat, pDCTstat, TestAddr1B, Channel, PatternB, Pass); @@ -339,13 +339,13 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, ResetDCTWrPtr_D(dev, index_reg, Addl_Index); print_debug_dqs("\t\t\tTrainRcvEn545: Test1 result ", Test1, 3); - if(Test1 == DQS_PASS) { + if (Test1 == DQS_PASS) { CurrTestSide1 = DQS_PASS; } } } - if(_2Ranks) { + if (_2Ranks) { if ((CurrTestSide0 == DQS_PASS) && (CurrTestSide1 == DQS_PASS)) { CurrTest = DQS_PASS; } @@ -358,7 +358,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, valid = mct_SavePassRcvEnDly_D(pDCTstat, RcvrEnDly, Channel, Receiver, Pass); /* Break(1:RevF,2:DR) or not(0) FIXME: This comment deosn't make sense */ - if(valid == 2 || (LastTest == DQS_FAIL && valid == 1)) { + if (valid == 2 || (LastTest == DQS_FAIL && valid == 1)) { RcvrEnDlyRmin = RcvrEnDly; break; } @@ -384,14 +384,14 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, print_debug_dqs("\t\tTrainRcvEn61: RcvrEnDly ", RcvrEnDly, 2); print_debug_dqs("\t\tTrainRcvEn61: RcvrEnDlyRmin ", RcvrEnDlyRmin, 3); print_debug_dqs("\t\tTrainRcvEn61: RcvrEnDlyLimit ", RcvrEnDlyLimit, 3); - if(RcvrEnDlyRmin == RcvrEnDlyLimit) { + if (RcvrEnDlyRmin == RcvrEnDlyLimit) { /* no passing window */ pDCTstat->ErrStatus |= 1 << SB_NORCVREN; Errors |= 1 << SB_NORCVREN; pDCTstat->ErrCode = SC_FatalErr; } - if(RcvrEnDly > (RcvrEnDlyLimit - 1)) { + if (RcvrEnDly > (RcvrEnDlyLimit - 1)) { /* passing window too narrow, too far delayed*/ pDCTstat->ErrStatus |= 1 << SB_SmallRCVR; Errors |= 1 << SB_SmallRCVR; @@ -406,12 +406,12 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, mct_SetFinalRcvrEnDly_D(pDCTstat, RcvrEnDly, Final_Value, Channel, Receiver, dev, index_reg, Addl_Index, Pass); - if(pDCTstat->ErrStatus & (1 << SB_SmallRCVR)) { + if (pDCTstat->ErrStatus & (1 << SB_SmallRCVR)) { Errors |= 1 << SB_SmallRCVR; } RcvrEnDly += Pass1MemClkDly; - if(RcvrEnDly > CTLRMaxDelay) { + if (RcvrEnDly > CTLRMaxDelay) { CTLRMaxDelay = RcvrEnDly; } @@ -430,7 +430,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, ResetDCTWrPtr_D(dev, index_reg, Addl_Index); - if(_DisableDramECC) { + if (_DisableDramECC) { mct_EnableDimmEccEn_D(pMCTstat, pDCTstat, _DisableDramECC); } @@ -440,13 +440,13 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, mct_DisableDQSRcvEn_D(pDCTstat); } - if(!_Wrap32Dis) { + if (!_Wrap32Dis) { msr = HWCR; _RDMSR(msr, &lo, &hi); lo &= ~(1<<17); /* restore HWCR.wrap32dis */ _WRMSR(msr, lo, hi); } - if(!_SSE2){ + if (!_SSE2){ cr4 = read_cr4(); cr4 &= ~(1<<9); /* restore cr4.OSFXSR */ write_cr4(cr4); @@ -456,7 +456,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, { u8 Channel; printk(BIOS_DEBUG, "TrainRcvrEn: CH_MaxRdLat:\n"); - for(Channel = 0; Channel<2; Channel++) { + for (Channel = 0; Channel<2; Channel++) { printk(BIOS_DEBUG, "Channel: %02x: %02x\n", Channel, pDCTstat->CH_MaxRdLat[Channel]); } } @@ -470,9 +470,9 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat, u8 *p; printk(BIOS_DEBUG, "TrainRcvrEn: CH_D_B_RCVRDLY:\n"); - for(Channel = 0; Channel < 2; Channel++) { + for (Channel = 0; Channel < 2; Channel++) { printk(BIOS_DEBUG, "Channel: %02x\n", Channel); - for(Receiver = 0; Receiver<8; Receiver+=2) { + for (Receiver = 0; Receiver<8; Receiver+=2) { printk(BIOS_DEBUG, "\t\tReceiver: %02x: ", Receiver); p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver>>1]; for (i=0;i<8; i++) { @@ -554,7 +554,7 @@ void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u8 RcvrEnDly, u8 *p; u32 val; - if(RcvrEnDly == 0xFE) { + if (RcvrEnDly == 0xFE) { /*set the boudary flag */ pDCTstat->Status |= 1 << SB_DQSRcvLimit; } @@ -562,8 +562,8 @@ void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u8 RcvrEnDly, /* DimmOffset not needed for CH_D_B_RCVRDLY array */ - for(i=0; i < 8; i++) { - if(FinalValue) { + for (i=0; i < 8; i++) { + if (FinalValue) { /*calculate dimm offset */ p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1]; RcvrEnDly = p[i]; @@ -574,7 +574,7 @@ void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u8 RcvrEnDly, index = Table_DQSRcvEn_Offset[i >> 1]; index += Addl_Index; /* DIMMx DqsRcvEn byte0 */ val = Get_NB32_index_wait(dev, index_reg, index); - if(i & 1) { + if (i & 1) { /* odd byte lane */ val &= ~(0xFF << 16); val |= (RcvrEnDly << 16); @@ -598,7 +598,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u8 DQ u32 val; u32 valx; - if(pDCTstat->GangedMode) + if (pDCTstat->GangedMode) Channel = 0; dev = pDCTstat->dev_dct; @@ -613,7 +613,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u8 DQ * add 1 MEMCLK to the sub-total. */ val = Get_NB32(dev, 0x90 + reg_off); - if(!(val & (1 << UnBuffDimm))) + if (!(val & (1 << UnBuffDimm))) SubTotal += 2; /* If the address prelaunch is setup for 1/2 MEMCLKs then @@ -621,7 +621,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u8 DQ * if (AddrCmdSetup || CsOdtSetup || CkeSetup) then K := K + 2; */ val = Get_NB32_index_wait(dev, index_reg, 0x04); - if(!(val & 0x00202020)) + if (!(val & 0x00202020)) SubTotal += 1; else SubTotal += 2; @@ -675,7 +675,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u8 DQ SubTotal += 5; pDCTstat->CH_MaxRdLat[Channel] = SubTotal; - if(pDCTstat->GangedMode) { + if (pDCTstat->GangedMode) { pDCTstat->CH_MaxRdLat[1] = SubTotal; } @@ -708,25 +708,25 @@ static u8 mct_SavePassRcvEnDly_D(struct DCTStatStruc *pDCTstat, /* cmp if there has new DqsRcvEnDly to be recorded */ mask_Pass = pDCTstat->DqsRcvEn_Pass; - if(Pass == SecondPass) { + if (Pass == SecondPass) { mask_Pass = ~mask_Pass; } mask_Saved = pDCTstat->DqsRcvEn_Saved; - if(mask_Pass != mask_Saved) { + if (mask_Pass != mask_Saved) { /* find desired stack offset according to channel/dimm/byte */ - if(Pass == SecondPass) { + if (Pass == SecondPass) { // FIXME: SecondPass is never used for Barcelona p = pDCTstat->CH_D_B_RCVRDLY_1[Channel][receiver>>1]; p = 0; // Keep the compiler happy. } else { mask_Saved &= mask_Pass; p = pDCTstat->CH_D_B_RCVRDLY[Channel][receiver>>1]; } - for(i=0; i < 8; i++) { + for (i=0; i < 8; i++) { /* cmp per byte lane */ - if(mask_Pass & (1 << i)) { - if(!(mask_Saved & (1 << i))) { + if (mask_Pass & (1 << i)) { + if (!(mask_Saved & (1 << i))) { /* save RcvEnDly to stack, according to the related Dimm/byte lane */ p[i] = (u8)rcvrEnDly; @@ -756,8 +756,8 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat, u8 value; - if(Pass == FirstPass) { - if(pattern==1) { + if (Pass == FirstPass) { + if (pattern==1) { test_buf = (u8 *)TestPattern1_D; } else { test_buf = (u8 *)TestPattern0_D; @@ -769,7 +769,7 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat, SetUpperFSbase(addr); addr <<= 8; - if((pDCTstat->Status & (1<<SB_128bitmode)) && channel ) { + if ((pDCTstat->Status & (1<<SB_128bitmode)) && channel ) { addr += 8; /* second channel */ test_buf += 8; } @@ -792,7 +792,7 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat, /* if first pass, at least one byte lane pass * ,then DQS_PASS=1 and will set to related reg. */ - if(pDCTstat->DqsRcvEn_Pass != 0) { + if (pDCTstat->DqsRcvEn_Pass != 0) { result = DQS_PASS; } else { result = DQS_FAIL; @@ -802,7 +802,7 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat, /* if second pass, at least one byte lane fail * ,then DQS_FAIL=1 and will set to related reg. */ - if(pDCTstat->DqsRcvEn_Pass != 0xFF) { + if (pDCTstat->DqsRcvEn_Pass != 0xFF) { result = DQS_FAIL; } else { result = DQS_PASS; @@ -812,7 +812,7 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat, /* if second pass, we can't find the fail until FFh, * then let it fail to save the final delay */ - if((Pass == SecondPass) && (pDCTstat->Status & (1 << SB_DQSRcvLimit))) { + if ((Pass == SecondPass) && (pDCTstat->Status & (1 << SB_DQSRcvLimit))) { result = DQS_FAIL; pDCTstat->DqsRcvEn_Pass = 0; } @@ -820,7 +820,7 @@ static u8 mct_CompareTestPatternQW0_D(struct MCTStatStruc *pMCTstat, /* second pass needs to be inverted * FIXME? this could be inverted in the above code to start with... */ - if(Pass == SecondPass) { + if (Pass == SecondPass) { if (result == DQS_PASS) { result = DQS_FAIL; } else if (result == DQS_FAIL) { /* FIXME: doesn't need to be else if */ @@ -843,7 +843,7 @@ static void mct_InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat, * Read Position is 1/2 Memclock Delay */ u8 i; - for(i=0;i<2; i++){ + for (i=0;i<2; i++){ InitDQSPos4RcvrEn_D(pMCTstat, pDCTstat, i); } } @@ -867,8 +867,8 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat, // FIXME: add Cx support dword = 0x00000000; - for(i=1; i<=3; i++) { - for(j=0; j<dn; j++) + for (i=1; i<=3; i++) { + for (j=0; j<dn; j++) /* DIMM0 Write Data Timing Low */ /* DIMM0 Write ECC Timing */ Set_NB32_index_wait(dev, index_reg, i + 0x100 * j, dword); @@ -876,14 +876,14 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat, /* errata #180 */ dword = 0x2f2f2f2f; - for(i=5; i<=6; i++) { - for(j=0; j<dn; j++) + for (i=5; i<=6; i++) { + for (j=0; j<dn; j++) /* DIMM0 Read DQS Timing Control Low */ Set_NB32_index_wait(dev, index_reg, i + 0x100 * j, dword); } dword = 0x0000002f; - for(j=0; j<dn; j++) + for (j=0; j<dn; j++) /* DIMM0 Read DQS ECC Timing Control */ Set_NB32_index_wait(dev, index_reg, 7 + 0x100 * j, dword); } @@ -903,7 +903,7 @@ void SetEccDQSRcvrEn_D(struct DCTStatStruc *pDCTstat, u8 Channel) index = 0x12; p = pDCTstat->CH_D_BC_RCVRDLY[Channel]; print_debug_dqs("\t\tSetEccDQSRcvrPos: Channel ", Channel, 2); - for(ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel += 2) { + for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel += 2) { val = p[ChipSel>>1]; Set_NB32_index_wait(dev, index_reg, index, val); print_debug_dqs_pair("\t\tSetEccDQSRcvrPos: ChipSel ", @@ -925,7 +925,7 @@ static void CalcEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat, EccDQSScale = pDCTstat->CH_EccDQSScale[Channel]; for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel += 2) { - if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, ChipSel)) { + if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, ChipSel)) { u8 *p; p = pDCTstat->CH_D_B_RCVRDLY[Channel][ChipSel>>1]; @@ -936,7 +936,7 @@ static void CalcEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat, * 2nd most like ECC byte lane */ val1 = p[(EccDQSLike>>8) & 0x07]; - if(val0 > val1) { + if (val0 > val1) { val = val0 - val1; } else { val = val1 - val0; @@ -945,7 +945,7 @@ static void CalcEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat, val *= ~EccDQSScale; val >>= 8; // /256 - if(val0 > val1) { + if (val0 > val1) { val -= val1; } else { val += val0; @@ -969,7 +969,7 @@ void mctSetEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat, if (!pDCTstat->NodePresent) break; if (pDCTstat->DCTSysLimit) { - for(i=0; i<2; i++) + for (i=0; i<2; i++) CalcEccDQSRcvrEn_D(pMCTstat, pDCTstat, i); } } @@ -987,7 +987,7 @@ void phyAssistedMemFnceTraining(struct MCTStatStruc *pMCTstat, while (Node < MAX_NODES_SUPPORTED) { pDCTstat = pDCTstatA + Node; - if(pDCTstat->DCTSysLimit) { + if (pDCTstat->DCTSysLimit) { fenceDynTraining_D(pMCTstat, pDCTstat, 0); fenceDynTraining_D(pMCTstat, pDCTstat, 1); } diff --git a/src/northbridge/amd/amdmct/mct/mctsrc1p.c b/src/northbridge/amd/amdmct/mct/mctsrc1p.c index 9e069520aa..e059e1eff1 100644 --- a/src/northbridge/amd/amdmct/mct/mctsrc1p.c +++ b/src/northbridge/amd/amdmct/mct/mctsrc1p.c @@ -50,7 +50,7 @@ static u8 mct_Average_RcvrEnDly_1Pass(struct DCTStatStruc *pDCTstat, u8 Channel, MaxValue = 0; p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1]; - for(i=0; i < 8; i++) { + for (i=0; i < 8; i++) { /* get left value from DCTStatStruc.CHA_D0_B0_RCVRDLY*/ val = p[i]; /* get right value from DCTStatStruc.CHA_D0_B0_RCVRDLY_1*/ @@ -77,7 +77,7 @@ u8 mct_SaveRcvEnDly_D_1Pass(struct DCTStatStruc *pDCTstat, u8 pass) { u8 ret; ret = 0; - if((pDCTstat->DqsRcvEn_Pass == 0xff) && (pass== FirstPass)) + if ((pDCTstat->DqsRcvEn_Pass == 0xff) && (pass== FirstPass)) ret = 2; return ret; } diff --git a/src/northbridge/amd/amdmct/mct/mctsrc2p.c b/src/northbridge/amd/amdmct/mct/mctsrc2p.c index daf2bdfe86..bd3c503097 100644 --- a/src/northbridge/amd/amdmct/mct/mctsrc2p.c +++ b/src/northbridge/amd/amdmct/mct/mctsrc2p.c @@ -24,7 +24,7 @@ u8 mct_checkNumberOfDqsRcvEn_Pass(u8 pass) u32 SetupDqsPattern_PassA(u8 Pass) { u32 ret; - if(Pass == FirstPass) + if (Pass == FirstPass) ret = (u32) TestPattern1_D; else ret = (u32) TestPattern2_D; @@ -36,7 +36,7 @@ u32 SetupDqsPattern_PassA(u8 Pass) u32 SetupDqsPattern_PassB(u8 Pass) { u32 ret; - if(Pass == FirstPass) + if (Pass == FirstPass) ret = (u32) TestPattern0_D; else ret = (u32) TestPattern2_D; @@ -66,12 +66,12 @@ u8 mct_Get_Start_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat, val = p[i]; // print_tx("mct_Get_Start_RcvrEnDly_Pass: i:", i); // print_tx("mct_Get_Start_RcvrEnDly_Pass: val:", val); - if(val > max) { + if (val > max) { max = val; } } RcvrEnDly = max; -// while(1) {; } +// while (1) {; } // RcvrEnDly += secPassOffset; //FIXME Why } @@ -100,7 +100,7 @@ u8 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat, //FIXME: which byte? p_1 = pDCTstat->B_RCVRDLY_1; // p_1 = pDCTstat->CH_D_B_RCVRDLY_1[Channel][Receiver>>1]; - for(i=0; i<bn; i++) { + for (i=0; i<bn; i++) { val = p[i]; /* left edge */ if (val != (RcvrEnDlyLimit - 1)) { @@ -120,7 +120,7 @@ u8 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat, pDCTstat->DimmTrainFail &= ~(1<<(Receiver + Channel)); } } else { - for(i=0; i < bn; i++) { + for (i=0; i < bn; i++) { val = p[i]; /* Add 1/2 Memlock delay */ //val += Pass1MemClkDly; diff --git a/src/northbridge/amd/amdmct/mct/mcttmrl.c b/src/northbridge/amd/amdmct/mct/mcttmrl.c index ef37e3b419..0eb3c61b49 100644 --- a/src/northbridge/amd/amdmct/mct/mcttmrl.c +++ b/src/northbridge/amd/amdmct/mct/mcttmrl.c @@ -79,7 +79,7 @@ static u32 SetupMaxRdPattern(struct MCTStatStruc *pMCTstat, buf = (u32 *)(((u32)buffer + 0x10) & (0xfffffff0)); - for(i = 0; i < (16 * 3); i++) { + for (i = 0; i < (16 * 3); i++) { buf[i] = TestMaxRdLAtPattern_D[i]; } @@ -93,14 +93,14 @@ void TrainMaxReadLatency_D(struct MCTStatStruc *pMCTstat, { u8 Node; - for(Node = 0; Node < MAX_NODES_SUPPORTED; Node++) { + for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) { struct DCTStatStruc *pDCTstat; pDCTstat = pDCTstatA + Node; - if(!pDCTstat->NodePresent) + if (!pDCTstat->NodePresent) break; - if(pDCTstat->DCTSysLimit) + if (pDCTstat->DCTSysLimit) maxRdLatencyTrain_D(pMCTstat, pDCTstat); } } @@ -124,7 +124,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, u32 pattern_buf; cr4 = read_cr4(); - if(cr4 & (1<<9)) { /* save the old value */ + if (cr4 & (1<<9)) { /* save the old value */ _SSE2 = 1; } cr4 |= (1<<9); /* OSFXSR enable SSE2 */ @@ -132,7 +132,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, addr = HWCR; _RDMSR(addr, &lo, &hi); - if(lo & (1<<17)) { /* save the old value */ + if (lo & (1<<17)) { /* save the old value */ _Wrap32Dis = 1; } lo |= (1<<17); /* HWCR.wrap32dis */ @@ -149,11 +149,11 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, print_debug_dqs("\tMaxRdLatencyTrain51: Channel ",Channel, 1); pDCTstat->Channel = Channel; - if( (pDCTstat->Status & (1 << SB_128bitmode)) && Channel) + if ( (pDCTstat->Status & (1 << SB_128bitmode)) && Channel) break; /*if ganged mode, skip DCT 1 */ TestAddr0 = GetMaxRdLatTestAddr_D(pMCTstat, pDCTstat, Channel, &RcvrEnDly, &valid); - if(!valid) /* Address not supported on current CS */ + if (!valid) /* Address not supported on current CS */ continue; /* rank 1 of DIMM, testpattern 0 */ WriteMaxRdLat1CLTestPattern_D(pattern_buf, TestAddr0); @@ -161,10 +161,10 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, MaxRdLatDly = mct_GetStartMaxRdLat_D(pMCTstat, pDCTstat, Channel, RcvrEnDly, &Margin); print_debug_dqs("\tMaxRdLatencyTrain52: MaxRdLatDly start ", MaxRdLatDly, 2); print_debug_dqs("\tMaxRdLatencyTrain52: MaxRdLatDly Margin ", Margin, 2); - while(MaxRdLatDly < MAX_RD_LAT) { /* sweep Delay value here */ + while (MaxRdLatDly < MAX_RD_LAT) { /* sweep Delay value here */ mct_setMaxRdLatTrnVal_D(pDCTstat, Channel, MaxRdLatDly); ReadMaxRdLat1CLTestPattern_D(TestAddr0); - if( CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS) + if ( CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS) break; SetTargetWTIO_D(TestAddr0); FlushMaxRdLatTestPattern_D(TestAddr0); @@ -175,17 +175,17 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, mct_setMaxRdLatTrnVal_D(pDCTstat, Channel, MaxRdLatDly + Margin); } - if(_DisableDramECC) { + if (_DisableDramECC) { mct_EnableDimmEccEn_D(pMCTstat, pDCTstat, _DisableDramECC); } - if(!_Wrap32Dis) { + if (!_Wrap32Dis) { addr = HWCR; _RDMSR(addr, &lo, &hi); lo &= ~(1<<17); /* restore HWCR.wrap32dis */ _WRMSR(addr, lo, hi); } - if(!_SSE2){ + if (!_SSE2){ cr4 = read_cr4(); cr4 &= ~(1<<9); /* restore cr4.OSFXSR */ write_cr4(cr4); @@ -195,7 +195,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, { u8 Channel; printk(BIOS_DEBUG, "maxRdLatencyTrain: CH_MaxRdLat:\n"); - for(Channel = 0; Channel<2; Channel++) { + for (Channel = 0; Channel<2; Channel++) { printk(BIOS_DEBUG, "Channel: %02x: %02x\n", Channel, pDCTstat->CH_MaxRdLat[Channel]); } } @@ -253,7 +253,7 @@ static u8 CompareMaxRdLatTestPattern_D(u32 pattern_buf, u32 addr) print_debug_dqs_pair("\t\t\t\t\t\ttest_buf = ", (u32)test_buf, " value = ", val_test, 5); print_debug_dqs_pair("\t\t\t\t\t\ttaddr_lo = ", addr_lo, " value = ", val, 5); - if(val != val_test) { + if (val != val_test) { ret = DQS_FAIL; break; } @@ -281,7 +281,7 @@ static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat, bn = 8; - if(pDCTstat->Status & (1 << SB_128bitmode)) { + if (pDCTstat->Status & (1 << SB_128bitmode)) { ch_start = 0; ch_end = 2; } else { @@ -291,12 +291,12 @@ static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat, *valid = 0; - for(ch = ch_start; ch < ch_end; ch++) { - for(d=0; d<4; d++) { - for(Byte = 0; Byte<bn; Byte++) { + for (ch = ch_start; ch < ch_end; ch++) { + for (d=0; d<4; d++) { + for (Byte = 0; Byte<bn; Byte++) { u8 tmp; tmp = pDCTstat->CH_D_B_RCVRDLY[ch][d][Byte]; - if(tmp>Max) { + if (tmp>Max) { Max = tmp; Channel_Max = Channel; d_Max = d; @@ -305,11 +305,11 @@ static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat, } } - if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel_Max, d_Max << 1)) { + if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel_Max, d_Max << 1)) { TestAddr0 = mct_GetMCTSysAddr_D(pMCTstat, pDCTstat, Channel_Max, d_Max << 1, valid); } - if(*valid) + if (*valid) *MaxRcvrEnDly = Max; return TestAddr0; @@ -328,7 +328,7 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat, u32 reg_off; u32 dev; - if(pDCTstat->GangedMode) + if (pDCTstat->GangedMode) Channel = 0; index_reg = 0x98 + 0x100 * Channel; @@ -342,14 +342,14 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat, /* If registered DIMMs are being used then add 1 MEMCLK to the sub-total*/ val = Get_NB32(dev, 0x90 + reg_off); - if(!(val & (1 << UnBuffDimm))) + if (!(val & (1 << UnBuffDimm))) SubTotal += 2; /*If the address prelaunch is setup for 1/2 MEMCLKs then add 1, * else add 2 to the sub-total. if (AddrCmdSetup || CsOdtSetup * || CkeSetup) then K := K + 2; */ val = Get_NB32_index_wait(dev, index_reg, 0x04); - if(!(val & 0x00202020)) + if (!(val & 0x00202020)) SubTotal += 1; else SubTotal += 2; diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c index 7aee892b36..0c37366845 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mct_d.c @@ -4313,7 +4313,7 @@ static void ClearDCT_D(struct MCTStatStruc *pMCTstat, reg_end = 0xA4; } - while(reg < reg_end) { + while (reg < reg_end) { if ((reg & 0xFF) == 0x84) { if (is_fam15h()) { val = Get_NB32_DCT(dev, dct, reg); @@ -5252,7 +5252,7 @@ static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat, dword = 0; byte = 0xFF; - while(dword < MAX_CS_SUPPORTED) { + while (dword < MAX_CS_SUPPORTED) { if (pDCTstat->CSPresent & (1<<dword)){ /* re-enable clocks for the enabled CS */ val = p[dword]; @@ -8130,7 +8130,7 @@ static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat, addr = HWCR; _RDMSR(addr, &lo, &hi); - if(lo & (1<<17)) { /* save the old value */ + if (lo & (1<<17)) { /* save the old value */ wrap32dis = 1; } lo |= (1<<17); /* HWCR.wrap32dis */ @@ -8158,7 +8158,7 @@ static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat, } } - if(!wrap32dis) { + if (!wrap32dis) { addr = HWCR; _RDMSR(addr, &lo, &hi); lo &= ~(1<<17); /* restore HWCR.wrap32dis */ diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h b/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h index 0d9c8863db..a7fac8f390 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h +++ b/src/northbridge/amd/amdmct/mct_ddr3/mct_d_gcc.h @@ -57,8 +57,8 @@ static u32 bsr(u32 x) u8 i; u32 ret = 0; - for(i=31; i>0; i--) { - if(x & (1<<i)) { + for (i=31; i>0; i--) { + if (x & (1<<i)) { ret = i; break; } @@ -73,8 +73,8 @@ static u32 bsf(u32 x) u8 i; u32 ret = 32; - for(i=0; i<32; i++) { - if(x & (1<<i)) { + for (i=0; i<32; i++) { + if (x & (1<<i)) { ret = i; break; } @@ -301,7 +301,7 @@ static u32 stream_to_int(u8 *p) val = 0; - for(i=3; i>=0; i--) { + for (i=3; i>=0; i--) { val <<= 8; valx = *(p+i); val |= valx; diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctardk5.c b/src/northbridge/amd/amdmct/mct_ddr3/mctardk5.c index f829f67ed0..3a14fd86b1 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctardk5.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctardk5.c @@ -65,12 +65,12 @@ static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload, if (MAAdimms == 1) { *ODC_CTL = 0x00113222; *CMDmode = 1; - } else /* if(MAAdimms == 0) */ { - if(Speed == 4) { + } else /* if (MAAdimms == 0) */ { + if (Speed == 4) { *CMDmode = 1; - } else if(Speed == 5) { + } else if (Speed == 5) { *CMDmode = 1; - } else if(Speed == 6) { + } else if (Speed == 6) { *CMDmode = 2; } else { *CMDmode = 2; diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c b/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c index 891f1d6710..3f013088ab 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctardk6.c @@ -59,7 +59,7 @@ static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload, *CMDmode = 1; if (mctGet_NVbits(NV_MAX_DIMMS) == 4) { - if(Speed == 4) { + if (Speed == 4) { *AddrTmgCTL = 0x00000000; } else if (Speed == 5) { *AddrTmgCTL = 0x003C3C3C; @@ -77,7 +77,7 @@ static void Get_ChannelPS_Cfg0_D( u8 MAAdimms, u8 Speed, u8 MAAload, *AddrTmgCTL = 0x00353935; } } else { - if(Speed == 4) { + if (Speed == 4) { *AddrTmgCTL = 0x00000000; if (MAAdimms == 3) *AddrTmgCTL = 0x00380038; diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c index da7ce165ed..3f56765e4e 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctcsi_d.c @@ -62,7 +62,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat, val = Get_NB32_DCT(dev, dct, reg); val >>= (ChipSel>>1)<<2; val &= 0x0f; - if(EnChipSels == 1) + if (EnChipSels == 1) BankEncd = val; else /*If number of Rows/Columns not equal, skip */ @@ -77,13 +77,13 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat, } if (DoIntlv) { - if(!_CsIntCap) { + if (!_CsIntCap) { pDCTstat->ErrStatus |= 1<<SB_BkIntDis; DoIntlv = 0; } } - if(DoIntlv) { + if (DoIntlv) { val = Tab_int_D[BankEncd]; if (pDCTstat->Status & (1<<SB_128bitmode)) val++; @@ -111,7 +111,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat, val |= val_hi; Set_NB32_DCT(dev, dct, reg, val); - if(ChipSel & 1) + if (ChipSel & 1) continue; reg = 0x60 + ((ChipSel>>1)<<2); /* Dram CS Mask 0 */ diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c index 8974a08e6e..06a70e6ebb 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctdqs_d.c @@ -476,7 +476,7 @@ static void TrainDQSRdWrPos_D_Fam10(struct MCTStatStruc *pMCTstat, for (; Receiver < 8; Receiver++) { if ((Receiver & 0x1) == 0) { /* Even rank of DIMM */ - if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver+1)) + if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver+1)) dual_rank = 1; else dual_rank = 0; @@ -1296,7 +1296,7 @@ static uint8_t TrainDQSRdWrPos_D_Fam15(struct MCTStatStruc *pMCTstat, dimm = (Receiver >> 1); if ((Receiver & 0x1) == 0) { /* Even rank of DIMM */ - if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, dct, Receiver+1)) + if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, dct, Receiver+1)) dual_rank = 1; else dual_rank = 0; @@ -2379,7 +2379,7 @@ u32 mct_GetMCTSysAddr_D(struct MCTStatStruc *pMCTstat, /* New stuff */ val += ((1 << 21) >> 8); /* Add 2MB offset to avoid compat area */ if (val >= MCT_TRNG_KEEPOUT_START) { - while(val < MCT_TRNG_KEEPOUT_END) + while (val < MCT_TRNG_KEEPOUT_END) val += (1 << (15-8)); /* add 32K */ } diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c index c0ae440a64..5d31849fb4 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctecc_d.c @@ -152,12 +152,12 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) val = Get_NB32(dev, reg); /* WE/RE is checked */ - if((val & 3)==3) { /* Node has dram populated */ + if ((val & 3)==3) { /* Node has dram populated */ /* Negate 'all nodes/dimms ECC' flag if non ecc memory populated */ - if( pDCTstat->Status & (1<<SB_ECCDIMMs)) { + if ( pDCTstat->Status & (1<<SB_ECCDIMMs)) { LDramECC = isDramECCEn_D(pDCTstat); - if(pDCTstat->ErrCode != SC_RunningOK) { + if (pDCTstat->ErrCode != SC_RunningOK) { pDCTstat->Status &= ~(1 << SB_ECCDIMMs); if (!OB_NBECC) { pDCTstat->ErrStatus |= (1 << SB_DramECCDis); @@ -168,7 +168,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) } else { AllECC = 0; } - if(LDramECC) { /* if ECC is enabled on this dram */ + if (LDramECC) { /* if ECC is enabled on this dram */ if (OB_NBECC) { mct_EnableDatIntlv_D(pMCTstat, pDCTstat); val = Get_NB32(pDCTstat->dev_dct, 0x110); @@ -194,7 +194,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) } /* if Node present */ } - if(AllECC) + if (AllECC) pMCTstat->GStatus |= 1<<GSB_ECCDIMMs; else pMCTstat->GStatus &= ~(1<<GSB_ECCDIMMs); @@ -210,7 +210,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) val = Get_NB32(pDCTstat->dev_map, reg); curBase = val & 0xffff0000; /*WE/RE is checked because memory config may have been */ - if((val & 3)==3) { /* Node has dram populated */ + if ((val & 3)==3) { /* Node has dram populated */ if (isDramECCEn_D(pDCTstat)) { /* if ECC is enabled on this dram */ dev = pDCTstat->dev_nbmisc; val = curBase << 8; @@ -292,7 +292,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA) } } - if(mctGet_NVbits(NV_SyncOnUnEccEn)) + if (mctGet_NVbits(NV_SyncOnUnEccEn)) setSyncOnUnEccEn_D(pMCTstat, pDCTstatA); mctHookAfterECC(); @@ -325,8 +325,8 @@ static void setSyncOnUnEccEn_D(struct MCTStatStruc *pMCTstat, reg = 0x40+(Node<<3); /* Dram Base Node 0 + index*/ val = Get_NB32(pDCTstat->dev_map, reg); /*WE/RE is checked because memory config may have been*/ - if((val & 3)==3) { /* Node has dram populated*/ - if( isDramECCEn_D(pDCTstat)) { + if ((val & 3)==3) { /* Node has dram populated*/ + if ( isDramECCEn_D(pDCTstat)) { /*if ECC is enabled on this dram*/ dev = pDCTstat->dev_nbmisc; reg = 0x44; /* MCA NB Configuration*/ @@ -348,16 +348,16 @@ static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat) u8 ch_end; u8 isDimmECCEn = 0; - if(pDCTstat->GangedMode) { + if (pDCTstat->GangedMode) { ch_end = 1; } else { ch_end = 2; } - for(i=0; i<ch_end; i++) { - if(pDCTstat->DIMMValidDCT[i] > 0){ + for (i=0; i<ch_end; i++) { + if (pDCTstat->DIMMValidDCT[i] > 0){ reg = 0x90; /* Dram Config Low */ val = Get_NB32_DCT(dev, i, reg); - if(val & (1<<DimmEcEn)) { + if (val & (1<<DimmEcEn)) { /* set local flag 'dram ecc capable' */ isDimmECCEn = 1; break; diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c index b7c0476e57..8ed2befb34 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctmtr_d.c @@ -43,15 +43,15 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, */ val = mctGet_NVbits(NV_BottomIO); - if(val == 0) + if (val == 0) val++; Bottom32bIO = val << (24-8); val = pMCTstat->SysLimit + 1; - if(val <= _4GB_RJ8) { + if (val <= _4GB_RJ8) { Bottom40bIO = 0; - if(Bottom32bIO >= val) + if (Bottom32bIO >= val) Bottom32bIO = val; } else { Bottom40bIO = val; @@ -89,7 +89,7 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, /* Base */ /* Limit */ /* MtrrAddr */ - if(addr == -1) /* ran out of MTRRs?*/ + if (addr == -1) /* ran out of MTRRs?*/ pMCTstat->GStatus |= 1<<GSB_MTRRshort; pMCTstat->Sub4GCacheTop = Cache32bTOP<<8; @@ -103,7 +103,7 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, _WRMSR(addr, lo, hi); printk(BIOS_DEBUG, "\t CPUMemTyping: Bottom32bIO:%x\n", Bottom32bIO); printk(BIOS_DEBUG, "\t CPUMemTyping: Bottom40bIO:%x\n", Bottom40bIO); - if(Bottom40bIO) { + if (Bottom40bIO) { hi = Bottom40bIO >> 24; lo = Bottom40bIO << 8; addr += 3; /* TOM2 */ @@ -111,7 +111,7 @@ void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, } addr = 0xC0010010; /* SYS_CFG */ _RDMSR(addr, &lo, &hi); - if(Bottom40bIO) { + if (Bottom40bIO) { lo |= (1<<21); /* MtrrTom2En=1 */ lo |= (1<<22); /* Tom2ForceMemTypeWB */ } else { @@ -158,7 +158,7 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType) val = curBase = Base; curLimit = *pLimit; addr = *pMtrrAddr; - while((addr >= 0x200) && (addr < 0x20C) && (val < *pLimit)) { + while ((addr >= 0x200) && (addr < 0x20C) && (val < *pLimit)) { /* start with "ascending" code path */ /* alignment (largest block size)*/ valx = 1 << bsf(curBase); @@ -166,7 +166,7 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType) /* largest legal limit, given current non-zero range Base*/ valx += curBase; - if((curBase == 0) || (*pLimit < valx)) { + if ((curBase == 0) || (*pLimit < valx)) { /* flop direction to "descending" code path*/ valx = 1<<bsr(*pLimit - curBase); curSize = valx; @@ -189,7 +189,7 @@ static void SetMTRRrange_D(u32 Base, u32 *pLimit, u32 *pMtrrAddr, u16 MtrrType) curBase = val; /* next Base = current Limit (loop exit)*/ addr++; /* next MTRR pair addr */ } - if(val < *pLimit) { + if (val < *pLimit) { *pLimit = val; addr = -1; } @@ -234,7 +234,7 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat addr = 0x200; lo = 0; hi = lo; - while( addr < 0x20C) { + while ( addr < 0x20C) { _WRMSR(addr, lo, hi); /* prog. MTRR with current region Mask */ addr++; /* next MTRR pair addr */ } @@ -244,7 +244,7 @@ void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat *======================================================================*/ printk(BIOS_DEBUG, "\t UMAMemTyping_D: Cache32bTOP:%x\n", Cache32bTOP); SetMTRRrangeWB_D(0, &Cache32bTOP, &addr); - if(addr == -1) /* ran out of MTRRs?*/ + if (addr == -1) /* ran out of MTRRs?*/ pMCTstat->GStatus |= 1<<GSB_MTRRshort; } } diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c index f550bdde72..9a769adbaf 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctndi_d.c @@ -62,7 +62,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, _SWHole = 0; } - if(!_SWHole) { + if (!_SWHole) { Base = Get_NB32(dev0, reg0); if (Base & 1) { NodesWmem++; @@ -81,7 +81,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, * are the same on all nodes */ DctSelBase = Get_NB32(pDCTstat->dev_dct, 0x114); - if(DctSelBase) { + if (DctSelBase) { DctSelBase <<= 8; if ( pDCTstat->Status & (1 << SB_HWHole)) { if (DctSelBase >= 0x1000000) { @@ -145,7 +145,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, Base = ((Nodes - 1) << 8) | 3; reg0 = 0x40; Node = 0; - while(Node < Nodes) { + while (Node < Nodes) { Set_NB32(dev0, reg0, Base); MemSize = MemSize0; MemSize--; @@ -159,7 +159,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, /* set base/limit to F1x120/124 per Node */ Node = 0; - while(Node < Nodes) { + while (Node < Nodes) { pDCTstat = pDCTstatA + Node; pDCTstat->NodeSysBase = 0; MemSize = MemSize0; @@ -180,7 +180,7 @@ void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, HoleBase = pMCTstat->HoleBase; if (Dct0MemSize >= HoleBase) { val = HWHoleSz; - if( Node == 0) { + if ( Node == 0) { val += Dct0MemSize; } } else { diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c index f97b8c64d3..324e35e980 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc.c @@ -76,7 +76,7 @@ static void SetupRcvrPattern(struct MCTStatStruc *pMCTstat, p_A = (u32 *)SetupDqsPattern_1PassB(pass); p_B = (u32 *)SetupDqsPattern_1PassA(pass); - for(i=0;i<16;i++) { + for (i=0;i<16;i++) { buf_a[i] = p_A[i]; buf_b[i] = p_B[i]; } @@ -88,7 +88,7 @@ static void SetupRcvrPattern(struct MCTStatStruc *pMCTstat, void mct_TrainRcvrEn_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 Pass) { - if(mct_checkNumberOfDqsRcvEn_1Pass(Pass)) { + if (mct_checkNumberOfDqsRcvEn_1Pass(Pass)) { if (is_fam15h()) dqsTrainRcvrEn_SW_Fam15(pMCTstat, pDCTstat, Pass); else @@ -560,7 +560,7 @@ static uint32_t convert_testaddr_and_channel_to_address(struct DCTStatStruc *pDC SetUpperFSbase(testaddr); testaddr <<= 8; - if((pDCTstat->Status & (1<<SB_128bitmode)) && channel ) { + if ((pDCTstat->Status & (1<<SB_128bitmode)) && channel ) { testaddr += 8; /* second channel */ } @@ -614,7 +614,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat, dev = pDCTstat->dev_dct; ch_start = 0; - if(!pDCTstat->GangedMode) { + if (!pDCTstat->GangedMode) { ch_end = 2; } else { ch_end = 1; @@ -636,7 +636,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat, } cr4 = read_cr4(); - if(cr4 & ( 1 << 9)) { /* save the old value */ + if (cr4 & ( 1 << 9)) { /* save the old value */ _SSE2 = 1; } cr4 |= (1 << 9); /* OSFXSR enable SSE2 */ @@ -644,7 +644,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat, msr = rdmsr(HWCR); /* FIXME: Why use SSEDIS */ - if(msr.lo & (1 << 17)) { /* save the old value */ + if (msr.lo & (1 << 17)) { /* save the old value */ _Wrap32Dis = 1; } msr.lo |= (1 << 17); /* HWCR.wrap32dis */ @@ -729,9 +729,9 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat, TestAddr0B = TestAddr0 + (BigPagex8_RJ8 << 3); - if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver+1)) { + if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, Receiver+1)) { TestAddr1 = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, Channel, Receiver+1, &valid); - if(!valid) { /* Address not supported on current CS */ + if (!valid) { /* Address not supported on current CS */ continue; } TestAddr1B = TestAddr1 + (BigPagex8_RJ8 << 3); @@ -972,7 +972,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat, ResetDCTWrPtr_D(dev, Channel, index_reg, Addl_Index); } - if(_DisableDramECC) { + if (_DisableDramECC) { mct_EnableDimmEccEn_D(pMCTstat, pDCTstat, _DisableDramECC); } @@ -981,12 +981,12 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat, mct_DisableDQSRcvEn_D(pDCTstat); } - if(!_Wrap32Dis) { + if (!_Wrap32Dis) { msr = rdmsr(HWCR); msr.lo &= ~(1<<17); /* restore HWCR.wrap32dis */ wrmsr(HWCR, msr); } - if(!_SSE2){ + if (!_SSE2){ cr4 = read_cr4(); cr4 &= ~(1<<9); /* restore cr4.OSFXSR */ write_cr4(cr4); @@ -996,7 +996,7 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat, { u8 ChannelDTD; printk(BIOS_DEBUG, "TrainRcvrEn: CH_MaxRdLat:\n"); - for(ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) { + for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) { printk(BIOS_DEBUG, "Channel:%x: %x\n", ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]); } @@ -1011,9 +1011,9 @@ static void dqsTrainRcvrEn_SW_Fam10(struct MCTStatStruc *pMCTstat, u16 *p; printk(BIOS_DEBUG, "TrainRcvrEn: CH_D_B_RCVRDLY:\n"); - for(ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) { + for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) { printk(BIOS_DEBUG, "Channel:%x\n", ChannelDTD); - for(ReceiverDTD = 0; ReceiverDTD<8; ReceiverDTD+=2) { + for (ReceiverDTD = 0; ReceiverDTD<8; ReceiverDTD+=2) { printk(BIOS_DEBUG, "\t\tReceiver:%x:", ReceiverDTD); p = pDCTstat->CH_D_B_RCVRDLY[ChannelDTD][ReceiverDTD>>1]; for (i=0;i<8; i++) { @@ -1246,7 +1246,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat, } cr4 = read_cr4(); - if(cr4 & ( 1 << 9)) { /* save the old value */ + if (cr4 & ( 1 << 9)) { /* save the old value */ _SSE2 = 1; } cr4 |= (1 << 9); /* OSFXSR enable SSE2 */ @@ -1255,7 +1255,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat, msr = HWCR; _RDMSR(msr, &lo, &hi); /* FIXME: Why use SSEDIS */ - if(lo & (1 << 17)) { /* save the old value */ + if (lo & (1 << 17)) { /* save the old value */ _Wrap32Dis = 1; } lo |= (1 << 17); /* HWCR.wrap32dis */ @@ -1485,7 +1485,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat, /* Calculate and program MaxRdLatency */ Calc_SetMaxRdLatency_D_Fam15(pMCTstat, pDCTstat, Channel, 0); - if(_DisableDramECC) { + if (_DisableDramECC) { mct_EnableDimmEccEn_D(pMCTstat, pDCTstat, _DisableDramECC); } @@ -1494,13 +1494,13 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat, mct_DisableDQSRcvEn_D(pDCTstat); } - if(!_Wrap32Dis) { + if (!_Wrap32Dis) { msr = HWCR; _RDMSR(msr, &lo, &hi); lo &= ~(1<<17); /* restore HWCR.wrap32dis */ _WRMSR(msr, lo, hi); } - if(!_SSE2){ + if (!_SSE2){ cr4 = read_cr4(); cr4 &= ~(1<<9); /* restore cr4.OSFXSR */ write_cr4(cr4); @@ -1510,7 +1510,7 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat, { u8 ChannelDTD; printk(BIOS_DEBUG, "TrainRcvrEn: CH_MaxRdLat:\n"); - for(ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) { + for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) { printk(BIOS_DEBUG, "Channel:%x: %x\n", ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]); } @@ -1525,9 +1525,9 @@ static void dqsTrainRcvrEn_SW_Fam15(struct MCTStatStruc *pMCTstat, u16 *p; printk(BIOS_DEBUG, "TrainRcvrEn: CH_D_B_RCVRDLY:\n"); - for(ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) { + for (ChannelDTD = 0; ChannelDTD < 2; ChannelDTD++) { printk(BIOS_DEBUG, "Channel:%x\n", ChannelDTD); - for(ReceiverDTD = 0; ReceiverDTD<8; ReceiverDTD+=2) { + for (ReceiverDTD = 0; ReceiverDTD<8; ReceiverDTD+=2) { printk(BIOS_DEBUG, "\t\tReceiver:%x:", ReceiverDTD); p = pDCTstat->CH_D_B_RCVRDLY[ChannelDTD][ReceiverDTD>>1]; for (i=0;i<8; i++) { @@ -1604,7 +1604,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat, ch_end = 2; cr4 = read_cr4(); - if(cr4 & ( 1 << 9)) { /* save the old value */ + if (cr4 & ( 1 << 9)) { /* save the old value */ _SSE2 = 1; } cr4 |= (1 << 9); /* OSFXSR enable SSE2 */ @@ -1613,7 +1613,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat, msr = HWCR; _RDMSR(msr, &lo, &hi); /* FIXME: Why use SSEDIS */ - if(lo & (1 << 17)) { /* save the old value */ + if (lo & (1 << 17)) { /* save the old value */ _Wrap32Dis = 1; } lo |= (1 << 17); /* HWCR.wrap32dis */ @@ -1710,17 +1710,17 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat, write_max_read_latency_to_registers(pMCTstat, pDCTstat, Channel, pDCTstat->CH_MaxRdLat[Channel]); } - if(_DisableDramECC) { + if (_DisableDramECC) { mct_EnableDimmEccEn_D(pMCTstat, pDCTstat, _DisableDramECC); } - if(!_Wrap32Dis) { + if (!_Wrap32Dis) { msr = HWCR; _RDMSR(msr, &lo, &hi); lo &= ~(1<<17); /* restore HWCR.wrap32dis */ _WRMSR(msr, lo, hi); } - if(!_SSE2){ + if (!_SSE2){ cr4 = read_cr4(); cr4 &= ~(1<<9); /* restore cr4.OSFXSR */ write_cr4(cr4); @@ -1730,7 +1730,7 @@ static void dqsTrainMaxRdLatency_SW_Fam15(struct MCTStatStruc *pMCTstat, { u8 ChannelDTD; printk(BIOS_DEBUG, "TrainMaxRdLatency: CH_MaxRdLat:\n"); - for(ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) { + for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) { printk(BIOS_DEBUG, "Channel:%x: %x\n", ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]); } @@ -1794,14 +1794,14 @@ void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u16 RcvrEnDly, u16 *p; u32 val; - if(RcvrEnDly == 0x1fe) { + if (RcvrEnDly == 0x1fe) { /*set the boundary flag */ pDCTstat->Status |= 1 << SB_DQSRcvLimit; } /* DimmOffset not needed for CH_D_B_RCVRDLY array */ - for(i=0; i < 8; i++) { - if(FinalValue) { + for (i=0; i < 8; i++) { + if (FinalValue) { /*calculate dimm offset */ p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1]; RcvrEnDly = p[i]; @@ -1812,7 +1812,7 @@ void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u16 RcvrEnDly, index = Table_DQSRcvEn_Offset[i >> 1]; index += Addl_Index; /* DIMMx DqsRcvEn byte0 */ val = Get_NB32_index_wait_DCT(dev, Channel, index_reg, index); - if(i & 1) { + if (i & 1) { /* odd byte lane */ val &= ~(0x1ff << 16); val |= ((RcvrEnDly & 0x1ff) << 16); @@ -1865,7 +1865,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u16 D cpu_val_p = 11; } - if(pDCTstat->GangedMode) + if (pDCTstat->GangedMode) Channel = 0; dev = pDCTstat->dev_dct; @@ -1879,7 +1879,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u16 D * add 1 MEMCLK to the sub-total. */ val = Get_NB32_DCT(dev, Channel, 0x90); - if(!(val & (1 << UnBuffDimm))) + if (!(val & (1 << UnBuffDimm))) SubTotal += 2; /* If the address prelaunch is setup for 1/2 MEMCLKs then @@ -1887,7 +1887,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u16 D * if (AddrCmdSetup || CsOdtSetup || CkeSetup) then K := K + 2; */ val = Get_NB32_index_wait_DCT(dev, Channel, index_reg, 0x04); - if(!(val & 0x00202020)) + if (!(val & 0x00202020)) SubTotal += 1; else SubTotal += 2; @@ -1925,7 +1925,7 @@ static void mct_SetMaxLatency_D(struct DCTStatStruc *pDCTstat, u8 Channel, u16 D SubTotal += (cpu_val_n) / 2; pDCTstat->CH_MaxRdLat[Channel][0] = SubTotal; - if(pDCTstat->GangedMode) { + if (pDCTstat->GangedMode) { pDCTstat->CH_MaxRdLat[1][0] = SubTotal; } @@ -1950,7 +1950,7 @@ static void mct_InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat, * Read Position is 1/2 Memclock Delay */ u8 i; - for(i=0;i<2; i++){ + for (i=0;i<2; i++){ InitDQSPos4RcvrEn_D(pMCTstat, pDCTstat, i); } } @@ -1972,8 +1972,8 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat, /* FIXME: add Cx support */ dword = 0x00000000; - for(i=1; i<=3; i++) { - for(j=0; j<dn; j++) + for (i=1; i<=3; i++) { + for (j=0; j<dn; j++) /* DIMM0 Write Data Timing Low */ /* DIMM0 Write ECC Timing */ Set_NB32_index_wait_DCT(dev, Channel, index_reg, i + 0x100 * j, dword); @@ -1981,14 +1981,14 @@ static void InitDQSPos4RcvrEn_D(struct MCTStatStruc *pMCTstat, /* errata #180 */ dword = 0x2f2f2f2f; - for(i=5; i<=6; i++) { - for(j=0; j<dn; j++) + for (i=5; i<=6; i++) { + for (j=0; j<dn; j++) /* DIMM0 Read DQS Timing Control Low */ Set_NB32_index_wait_DCT(dev, Channel, index_reg, i + 0x100 * j, dword); } dword = 0x0000002f; - for(j=0; j<dn; j++) + for (j=0; j<dn; j++) /* DIMM0 Read DQS ECC Timing Control */ Set_NB32_index_wait_DCT(dev, Channel, index_reg, 7 + 0x100 * j, dword); } @@ -2007,7 +2007,7 @@ void SetEccDQSRcvrEn_D(struct DCTStatStruc *pDCTstat, u8 Channel) index = 0x12; p = pDCTstat->CH_D_BC_RCVRDLY[Channel]; print_debug_dqs("\t\tSetEccDQSRcvrPos: Channel ", Channel, 2); - for(ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel += 2) { + for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel += 2) { val = p[ChipSel>>1]; Set_NB32_index_wait_DCT(dev, Channel, index_reg, index, val); print_debug_dqs_pair("\t\tSetEccDQSRcvrPos: ChipSel ", @@ -2029,7 +2029,7 @@ static void CalcEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat, EccDQSScale = pDCTstat->CH_EccDQSScale[Channel]; for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel += 2) { - if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, ChipSel)) { + if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel, ChipSel)) { u16 *p; p = pDCTstat->CH_D_B_RCVRDLY[Channel][ChipSel>>1]; @@ -2049,7 +2049,7 @@ static void CalcEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat, * 2nd most like ECC byte lane */ val1 = p[(EccDQSLike>>8) & 0x07]; - if(val0 > val1) { + if (val0 > val1) { val = val0 - val1; } else { val = val1 - val0; @@ -2058,7 +2058,7 @@ static void CalcEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat, val *= ~EccDQSScale; val >>= 8; /* /256 */ - if(val0 > val1) { + if (val0 > val1) { val -= val1; } else { val += val0; @@ -2087,7 +2087,7 @@ void mctSetEccDQSRcvrEn_D(struct MCTStatStruc *pMCTstat, if (!pDCTstat->NodePresent) break; if (pDCTstat->DCTSysLimit) { - for(i=0; i<2; i++) + for (i=0; i<2; i++) CalcEccDQSRcvrEn_D(pMCTstat, pDCTstat, i); } } diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c index 15e66c934c..d5357355cd 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc1p.c @@ -49,7 +49,7 @@ static u16 mct_Average_RcvrEnDly_1Pass(struct DCTStatStruc *pDCTstat, u8 Channel MaxValue = 0; p = pDCTstat->CH_D_B_RCVRDLY[Channel][Receiver >> 1]; - for(i=0; i < 8; i++) { + for (i=0; i < 8; i++) { /* get left value from DCTStatStruc.CHA_D0_B0_RCVRDLY*/ val = p[i]; /* get right value from DCTStatStruc.CHA_D0_B0_RCVRDLY_1*/ @@ -69,7 +69,7 @@ u8 mct_SaveRcvEnDly_D_1Pass(struct DCTStatStruc *pDCTstat, u8 pass) { u8 ret; ret = 0; - if((pDCTstat->DqsRcvEn_Pass == 0xff) && (pass== FirstPass)) + if ((pDCTstat->DqsRcvEn_Pass == 0xff) && (pass== FirstPass)) ret = 2; return ret; } diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c index c386fce087..2f4d4da82b 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mctsrc2p.c @@ -23,7 +23,7 @@ u8 mct_checkNumberOfDqsRcvEn_Pass(u8 pass) u32 SetupDqsPattern_PassA(u8 Pass) { u32 ret; - if(Pass == FirstPass) + if (Pass == FirstPass) ret = (u32) TestPattern1_D; else ret = (u32) TestPattern2_D; @@ -34,7 +34,7 @@ u32 SetupDqsPattern_PassA(u8 Pass) u32 SetupDqsPattern_PassB(u8 Pass) { u32 ret; - if(Pass == FirstPass) + if (Pass == FirstPass) ret = (u32) TestPattern0_D; else ret = (u32) TestPattern2_D; @@ -61,7 +61,7 @@ u8 mct_Get_Start_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat, for ( i=0;i<bn; i++) { val = p[i]; - if(val > max) { + if (val > max) { max = val; } } @@ -91,7 +91,7 @@ u16 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat, /* FIXME: which byte? */ p_1 = pDCTstat->B_RCVRDLY_1; /* p_1 = pDCTstat->CH_D_B_RCVRDLY_1[Channel][Receiver>>1]; */ - for(i=0; i<bn; i++) { + for (i=0; i<bn; i++) { val = p[i]; /* left edge */ if (val != (RcvrEnDlyLimit - 1)) { @@ -111,7 +111,7 @@ u16 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat, pDCTstat->DimmTrainFail &= ~(1<<(Receiver + Channel)); } } else { - for(i=0; i < bn; i++) { + for (i=0; i < bn; i++) { val = p[i]; /* Add 1/2 Memlock delay */ /* val += Pass1MemClkDly; */ diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c b/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c index 8cda2f8db0..15eb67e15b 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mcttmrl.c @@ -77,7 +77,7 @@ static u32 SetupMaxRdPattern(struct MCTStatStruc *pMCTstat, buf = (u32 *)(((u32)buffer + 0x10) & (0xfffffff0)); - for(i = 0; i < (16 * 3); i++) { + for (i = 0; i < (16 * 3); i++) { buf[i] = TestMaxRdLAtPattern_D[i]; } @@ -89,14 +89,14 @@ void TrainMaxReadLatency_D(struct MCTStatStruc *pMCTstat, { u8 Node; - for(Node = 0; Node < MAX_NODES_SUPPORTED; Node++) { + for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) { struct DCTStatStruc *pDCTstat; pDCTstat = pDCTstatA + Node; - if(!pDCTstat->NodePresent) + if (!pDCTstat->NodePresent) break; - if(pDCTstat->DCTSysLimit) + if (pDCTstat->DCTSysLimit) maxRdLatencyTrain_D(pMCTstat, pDCTstat); } } @@ -119,7 +119,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, u32 pattern_buf; cr4 = read_cr4(); - if(cr4 & (1<<9)) { /* save the old value */ + if (cr4 & (1<<9)) { /* save the old value */ _SSE2 = 1; } cr4 |= (1<<9); /* OSFXSR enable SSE2 */ @@ -127,7 +127,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, addr = HWCR; _RDMSR(addr, &lo, &hi); - if(lo & (1<<17)) { /* save the old value */ + if (lo & (1<<17)) { /* save the old value */ _Wrap32Dis = 1; } lo |= (1<<17); /* HWCR.wrap32dis */ @@ -144,11 +144,11 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, print_debug_dqs("\tMaxRdLatencyTrain51: Channel ",Channel, 1); pDCTstat->Channel = Channel; - if( (pDCTstat->Status & (1 << SB_128bitmode)) && Channel) + if ( (pDCTstat->Status & (1 << SB_128bitmode)) && Channel) break; /*if ganged mode, skip DCT 1 */ TestAddr0 = GetMaxRdLatTestAddr_D(pMCTstat, pDCTstat, Channel, &RcvrEnDly, &valid); - if(!valid) /* Address not supported on current CS */ + if (!valid) /* Address not supported on current CS */ continue; /* rank 1 of DIMM, testpattern 0 */ WriteMaxRdLat1CLTestPattern_D(pattern_buf, TestAddr0); @@ -156,10 +156,10 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, MaxRdLatDly = mct_GetStartMaxRdLat_D(pMCTstat, pDCTstat, Channel, RcvrEnDly, &Margin); print_debug_dqs("\tMaxRdLatencyTrain52: MaxRdLatDly start ", MaxRdLatDly, 2); print_debug_dqs("\tMaxRdLatencyTrain52: MaxRdLatDly Margin ", Margin, 2); - while(MaxRdLatDly < MAX_RD_LAT) { /* sweep Delay value here */ + while (MaxRdLatDly < MAX_RD_LAT) { /* sweep Delay value here */ mct_setMaxRdLatTrnVal_D(pDCTstat, Channel, MaxRdLatDly); ReadMaxRdLat1CLTestPattern_D(TestAddr0); - if( CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS) + if ( CompareMaxRdLatTestPattern_D(pattern_buf, TestAddr0) == DQS_PASS) break; SetTargetWTIO_D(TestAddr0); FlushMaxRdLatTestPattern_D(TestAddr0); @@ -170,17 +170,17 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, mct_setMaxRdLatTrnVal_D(pDCTstat, Channel, MaxRdLatDly + Margin); } - if(_DisableDramECC) { + if (_DisableDramECC) { mct_EnableDimmEccEn_D(pMCTstat, pDCTstat, _DisableDramECC); } - if(!_Wrap32Dis) { + if (!_Wrap32Dis) { addr = HWCR; _RDMSR(addr, &lo, &hi); lo &= ~(1<<17); /* restore HWCR.wrap32dis */ _WRMSR(addr, lo, hi); } - if(!_SSE2){ + if (!_SSE2){ cr4 = read_cr4(); cr4 &= ~(1<<9); /* restore cr4.OSFXSR */ write_cr4(cr4); @@ -190,7 +190,7 @@ static void maxRdLatencyTrain_D(struct MCTStatStruc *pMCTstat, { u8 ChannelDTD; printk(BIOS_DEBUG, "maxRdLatencyTrain: CH_MaxRdLat:\n"); - for(ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) { + for (ChannelDTD = 0; ChannelDTD<2; ChannelDTD++) { printk(BIOS_DEBUG, "Channel: %02x: %02x\n", ChannelDTD, pDCTstat->CH_MaxRdLat[ChannelDTD][0]); } } @@ -245,7 +245,7 @@ static u8 CompareMaxRdLatTestPattern_D(u32 pattern_buf, u32 addr) print_debug_dqs_pair("\t\t\t\t\t\ttest_buf = ", (u32)test_buf, " value = ", val_test, 5); print_debug_dqs_pair("\t\t\t\t\t\ttaddr_lo = ", addr_lo, " value = ", val, 5); - if(val != val_test) { + if (val != val_test) { ret = DQS_FAIL; break; } @@ -273,7 +273,7 @@ static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat, bn = 8; - if(pDCTstat->Status & (1 << SB_128bitmode)) { + if (pDCTstat->Status & (1 << SB_128bitmode)) { ch_start = 0; ch_end = 2; } else { @@ -283,12 +283,12 @@ static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat, *valid = 0; - for(ch = ch_start; ch < ch_end; ch++) { - for(d=0; d<4; d++) { - for(Byte = 0; Byte<bn; Byte++) { + for (ch = ch_start; ch < ch_end; ch++) { + for (d=0; d<4; d++) { + for (Byte = 0; Byte<bn; Byte++) { u8 tmp; tmp = pDCTstat->CH_D_B_RCVRDLY[ch][d][Byte]; - if(tmp>Max) { + if (tmp>Max) { Max = tmp; Channel_Max = Channel; d_Max = d; @@ -297,11 +297,11 @@ static u32 GetMaxRdLatTestAddr_D(struct MCTStatStruc *pMCTstat, } } - if(mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel_Max, d_Max << 1)) { + if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, Channel_Max, d_Max << 1)) { TestAddr0 = mct_GetMCTSysAddr_D(pMCTstat, pDCTstat, Channel_Max, d_Max << 1, valid); } - if(*valid) + if (*valid) *MaxRcvrEnDly = Max; return TestAddr0; @@ -318,7 +318,7 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat, u32 index_reg; u32 dev; - if(pDCTstat->GangedMode) + if (pDCTstat->GangedMode) Channel = 0; index_reg = 0x98; @@ -331,14 +331,14 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat, /* If registered DIMMs are being used then add 1 MEMCLK to the sub-total*/ val = Get_NB32_DCT(dev, Channel, 0x90); - if(!(val & (1 << UnBuffDimm))) + if (!(val & (1 << UnBuffDimm))) SubTotal += 2; /*If the address prelaunch is setup for 1/2 MEMCLKs then add 1, * else add 2 to the sub-total. if (AddrCmdSetup || CsOdtSetup * || CkeSetup) then K := K + 2; */ val = Get_NB32_index_wait_DCT(dev, Channel, index_reg, 0x04); - if(!(val & 0x00202020)) + if (!(val & 0x00202020)) SubTotal += 1; else SubTotal += 2; diff --git a/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c b/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c index ffc6fb2df0..5c30bc554c 100644 --- a/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c +++ b/src/northbridge/amd/amdmct/mct_ddr3/mhwlc_d.c @@ -1382,12 +1382,12 @@ void setWLByteDelay(struct DCTStatStruc *pDCTstat, uint8_t dct, u8 ByteLane, u8 * - Program seed gross delay as 2 (gross is 4 or 6) or 1 (gross is 5). * - Keep original seed gross delay for later reference. */ - if(grossDelayValue >= 3) + if (grossDelayValue >= 3) grossDelayValue = (grossDelayValue&1)? 1 : 2; fineDelayValue = pDCTData->WLFineDelay[index+ByteLane]; if (ByteLane < 4) ValueLow |= ((grossDelayValue << 5) | fineDelayValue) << 8*ByteLane; - else if(ByteLane < 8) + else if (ByteLane < 8) ValueHigh |= ((grossDelayValue << 5) | fineDelayValue) << 8*(ByteLane-4); else EccValue = ((grossDelayValue << 5) | fineDelayValue); @@ -1505,7 +1505,7 @@ void getWLByteDelay(struct DCTStatStruc *pDCTstat, uint8_t dct, u8 ByteLane, u8 if (pDCTData->WLGrossDelay[index+ByteLane] >= 3) { gross += pDCTData->WLGrossDelay[index+ByteLane]; - if(pDCTData->WLGrossDelay[index+ByteLane] & 1) + if (pDCTData->WLGrossDelay[index+ByteLane] & 1) gross -= 1; else gross -= 2; diff --git a/src/northbridge/amd/amdmct/wrappers/mcti_d.c b/src/northbridge/amd/amdmct/wrappers/mcti_d.c index 12115c5aaf..0ba2d33a7f 100644 --- a/src/northbridge/amd/amdmct/wrappers/mcti_d.c +++ b/src/northbridge/amd/amdmct/wrappers/mcti_d.c @@ -492,7 +492,7 @@ static void vErratum372(struct DCTStatStruc *pDCTstat) static void vErratum414(struct DCTStatStruc *pDCTstat) { int dct=0; - for(; dct < 2 ; dct++) + for (; dct < 2 ; dct++) { int dRAMConfigHi = Get_NB32(pDCTstat->dev_dct,0x94 + (0x100 * dct)); int powerDown = dRAMConfigHi & (1 << PowerDownEn ); diff --git a/src/northbridge/amd/cimx/rd890/NbPlatform.h b/src/northbridge/amd/cimx/rd890/NbPlatform.h index bcd5dda8f9..9e75cb6e65 100644 --- a/src/northbridge/amd/cimx/rd890/NbPlatform.h +++ b/src/northbridge/amd/cimx/rd890/NbPlatform.h @@ -44,13 +44,13 @@ #define ASSERT CIMX_ASSERT #endif #ifdef CIMX_TRACE_SUPPORT - #define CIMX_ASSERT(x) if(!(x)) {\ + #define CIMX_ASSERT(x) if (!(x)) {\ LibAmdTraceDebug (CIMX_TRACE_ALL, (CHAR8 *)"ASSERT !!! "__FILE__" - line %d\n", __LINE__); \ /*__asm {jmp $}; */\ } //#define IDS_HDT_CONSOLE(s, args...) do_printk(BIOS_DEBUG, s, ##args) #else - #define CIMX_ASSERT(x) if(!(x)) {\ + #define CIMX_ASSERT(x) if (!(x)) {\ /*__asm {jmp $}; */\ } #endif diff --git a/src/northbridge/amd/cimx/rd890/late.c b/src/northbridge/amd/cimx/rd890/late.c index bacadc5b6f..c10428dd33 100644 --- a/src/northbridge/amd/cimx/rd890/late.c +++ b/src/northbridge/amd/cimx/rd890/late.c @@ -78,7 +78,7 @@ static void rd890_enable(device_t dev) 0, (devfn >> 3), (devfn & 0x07), dev->enabled); /* we only do this once */ - if(devfn==0) { + if (devfn==0) { /* CIMX configuration defualt initialize */ rd890_cimx_config(&gConfig, &nb_cfg[0], &ht_cfg[0], &pcie_cfg[0]); if (gConfig.StandardHeader.CalloutPtr != NULL) { diff --git a/src/northbridge/amd/gx2/northbridge.c b/src/northbridge/amd/gx2/northbridge.c index eb57d7e1e8..5c49f1add3 100644 --- a/src/northbridge/amd/gx2/northbridge.c +++ b/src/northbridge/amd/gx2/northbridge.c @@ -227,7 +227,7 @@ static void northbridge_set_resources(struct device *dev) struct bus *bus; - for(bus = dev->link_list; bus; bus = bus->next) { + for (bus = dev->link_list; bus; bus = bus->next) { if (bus->children) { printk(BIOS_DEBUG, "my_dev_set_resources: assign_resources %d\n", bus->secondary); diff --git a/src/northbridge/amd/pi/00630F01/northbridge.c b/src/northbridge/amd/pi/00630F01/northbridge.c index 2809f2f0e2..c55cefb359 100644 --- a/src/northbridge/amd/pi/00630F01/northbridge.c +++ b/src/northbridge/amd/pi/00630F01/northbridge.c @@ -146,7 +146,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -335,7 +335,7 @@ static void read_resources(device_t dev) * It is not honored by the coreboot resource allocator if it is in * the CPU_CLUSTER. */ - if(IS_ENABLED(CONFIG_MMCONF_SUPPORT)) + if (IS_ENABLED(CONFIG_MMCONF_SUPPORT)) enable_mmconf_resource(dev); } @@ -403,7 +403,7 @@ static void create_vga_resource(device_t dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate )) break; } @@ -849,7 +849,7 @@ static void domain_set_resources(device_t dev) else set_top_of_ram(ramtop); - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } @@ -1030,7 +1030,7 @@ static void cpu_bus_scan(device_t dev) * ensure all of the cpu's pci devices are found. */ int fn; - for(fn = 0; fn <= 5; fn++) { //FBDIMM? + for (fn = 0; fn <= 5; fn++) { //FBDIMM? cdb_dev = pci_probe_dev(NULL, pbus, PCI_DEVFN(devn, fn)); } diff --git a/src/northbridge/amd/pi/00660F01/northbridge.c b/src/northbridge/amd/pi/00660F01/northbridge.c index e4af5f02a6..1caecf9850 100644 --- a/src/northbridge/amd/pi/00660F01/northbridge.c +++ b/src/northbridge/amd/pi/00660F01/northbridge.c @@ -149,7 +149,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -330,7 +330,7 @@ static void read_resources(device_t dev) * It is not honored by the coreboot resource allocator if it is in * the CPU_CLUSTER. */ - if(IS_ENABLED(CONFIG_MMCONF_SUPPORT)) + if (IS_ENABLED(CONFIG_MMCONF_SUPPORT)) enable_mmconf_resource(dev); } @@ -396,7 +396,7 @@ static void create_vga_resource(device_t dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate ) ) #endif @@ -855,7 +855,7 @@ static void domain_set_resources(device_t dev) set_top_of_ram(ramtop); #endif - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } @@ -1033,7 +1033,7 @@ static void cpu_bus_scan(device_t dev) * ensure all of the cpu's pci devices are found. */ int fn; - for(fn = 0; fn <= 5; fn++) { //FBDIMM? + for (fn = 0; fn <= 5; fn++) { //FBDIMM? cdb_dev = pci_probe_dev(NULL, pbus, PCI_DEVFN(devn, fn)); } diff --git a/src/northbridge/amd/pi/00730F01/northbridge.c b/src/northbridge/amd/pi/00730F01/northbridge.c index 9b1c1b1691..6f7f993046 100644 --- a/src/northbridge/amd/pi/00730F01/northbridge.c +++ b/src/northbridge/amd/pi/00730F01/northbridge.c @@ -156,7 +156,7 @@ static void f1_write_config32(unsigned reg, u32 value) int i; if (fx_devs == 0) get_fx_devs(); - for(i = 0; i < fx_devs; i++) { + for (i = 0; i < fx_devs; i++) { device_t dev; dev = __f1_dev[i]; if (dev && dev->enabled) { @@ -349,7 +349,7 @@ static void read_resources(device_t dev) * It is not honored by the coreboot resource allocator if it is in * the CPU_CLUSTER. */ - if(IS_ENABLED(CONFIG_MMCONF_SUPPORT)) + if (IS_ENABLED(CONFIG_MMCONF_SUPPORT)) enable_mmconf_resource(dev); } @@ -417,7 +417,7 @@ static void create_vga_resource(device_t dev, unsigned nodeid) printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary, link->secondary,link->subordinate); /* We need to make sure the vga_pri is under the link */ - if((vga_pri->bus->secondary >= link->secondary ) && + if ((vga_pri->bus->secondary >= link->secondary ) && (vga_pri->bus->secondary <= link->subordinate ) ) #endif @@ -879,7 +879,7 @@ static void domain_set_resources(device_t dev) set_top_of_ram(ramtop); #endif - for(link = dev->link_list; link; link = link->next) { + for (link = dev->link_list; link; link = link->next) { if (link->children) { assign_resources(link); } @@ -1060,7 +1060,7 @@ static void cpu_bus_scan(device_t dev) * ensure all of the cpu's pci devices are found. */ int fn; - for(fn = 0; fn <= 5; fn++) { //FBDIMM? + for (fn = 0; fn <= 5; fn++) { //FBDIMM? cdb_dev = pci_probe_dev(NULL, pbus, PCI_DEVFN(devn, fn)); } diff --git a/src/northbridge/amd/pi/def_callouts.c b/src/northbridge/amd/pi/def_callouts.c index 52c4fd5ebb..61b5c8bea1 100644 --- a/src/northbridge/amd/pi/def_callouts.c +++ b/src/northbridge/amd/pi/def_callouts.c @@ -32,7 +32,7 @@ AGESA_STATUS GetBiosCallout (UINT32 Func, UINT32 Data, VOID *ConfigPtr) if (BiosCallouts[i].CalloutName == Func) break; } - if(i >= BiosCalloutsLen) + if (i >= BiosCalloutsLen) return AGESA_UNSUPPORTED; return BiosCallouts[i].CalloutPtr (Func, Data, ConfigPtr); |