diff options
Diffstat (limited to 'src/cpu/amd')
-rw-r--r-- | src/cpu/amd/car/cache_as_ram_post.c | 1 | ||||
-rw-r--r-- | src/cpu/amd/model_fxx/model_fxx_init.c | 74 | ||||
-rw-r--r-- | src/cpu/amd/model_fxx/model_fxx_msr.h | 1 | ||||
-rw-r--r-- | src/cpu/amd/mtrr/amd_mtrr.c | 20 |
4 files changed, 64 insertions, 32 deletions
diff --git a/src/cpu/amd/car/cache_as_ram_post.c b/src/cpu/amd/car/cache_as_ram_post.c index 66ca9fdf96..6a129b258a 100644 --- a/src/cpu/amd/car/cache_as_ram_post.c +++ b/src/cpu/amd/car/cache_as_ram_post.c @@ -1,4 +1,5 @@ /* by yhlu 6.2005 */ +/* be warned, this file will be used other cores and core0/node0 */ __asm__ volatile ( /* FIXME : backup stack in CACHE_AS_RAM into mmx and sse and after we get STACK up, we restore that. diff --git a/src/cpu/amd/model_fxx/model_fxx_init.c b/src/cpu/amd/model_fxx/model_fxx_init.c index b75025b4c8..3c526e78ca 100644 --- a/src/cpu/amd/model_fxx/model_fxx_init.c +++ b/src/cpu/amd/model_fxx/model_fxx_init.c @@ -192,6 +192,7 @@ static void init_ecc_memory(unsigned node_id) /* If ecc support is not enabled don't touch memory */ dcl = pci_read_config32(f2_dev, DRAM_CONFIG_LOW); if (!(dcl & DCL_DimmEccEn)) { + printk_debug("ECC Disabled\n"); return; } @@ -226,7 +227,9 @@ static void init_ecc_memory(unsigned node_id) disable_lapic(); /* Walk through 2M chunks and zero them */ - for(basek = begink; basek < endk; basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1))) { + for(basek = begink; basek < endk; + basek = ((basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1))) + { unsigned long limitk; unsigned long size; void *addr; @@ -255,12 +258,13 @@ static void init_ecc_memory(unsigned node_id) } size = (limitk - basek) << 10; addr = map_2M_page(basek >> 11); - addr = (void *)(((uint32_t)addr) | ((basek & 0x7ff) << 10)); if (addr == MAPPING_ERROR) { + printk_err("Cannot map page: %x\n", basek >> 11); continue; } /* clear memory 2M (limitk - basek) */ + addr = (void *)(((uint32_t)addr) | ((basek & 0x7ff) << 10)); clear_memory(addr, size); } /* Restore the normal state */ @@ -319,19 +323,16 @@ static inline void k8_errata(void) } wrmsr(NB_CFG_MSR, msr); } -// AMD_D0_SUPPORT + + /* Erratum 97 ... */ if (!is_cpu_pre_c0() && is_cpu_pre_d0()) { - /* D0 later don't need it */ - /* Erratum 97 ... */ msr = rdmsr_amd(DC_CFG_MSR); msr.lo |= 1 << 3; wrmsr_amd(DC_CFG_MSR, msr); - } - -//AMD_D0_SUPPORT - if(is_cpu_pre_d0()) { - /*D0 later don't need it */ - /* Erratum 94 ... */ + } + + /* Erratum 94 ... */ + if (is_cpu_pre_d0()) { msr = rdmsr_amd(IC_CFG_MSR); msr.lo |= 1 << 11; wrmsr_amd(IC_CFG_MSR, msr); @@ -339,37 +340,51 @@ static inline void k8_errata(void) /* Erratum 91 prefetch miss is handled in the kernel */ -//AMD_D0_SUPPORT + /* Erratum 106 ... */ + msr = rdmsr_amd(LS_CFG_MSR); + msr.lo |= 1 << 25; + wrmsr_amd(LS_CFG_MSR, msr); + + /* Erratum 107 ... */ + msr = rdmsr_amd(BU_CFG_MSR); + msr.hi |= 1 << (43 - 32); + wrmsr_amd(BU_CFG_MSR, msr); + if(is_cpu_d0()) { /* Erratum 110 ...*/ msr = rdmsr_amd(CPU_ID_HYPER_EXT_FEATURES); msr.hi |=1; wrmsr_amd(CPU_ID_HYPER_EXT_FEATURES, msr); - } + } -//AMD_E0_SUPPORT - if(!is_cpu_pre_e0()) { - /* Erratum 110 ...*/ + if (!is_cpu_pre_e0()) { + /* Erratum 110 ... */ msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |=1; wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } + + /* Erratum 122 */ + msr = rdmsr(HWCR_MSR); + msr.lo |= 1 << 6; + wrmsr(HWCR_MSR, msr); + } void model_fxx_init(device_t dev) { unsigned long i; msr_t msr; -#if CONFIG_LOGICAL_CPUS==1 +#if CONFIG_LOGICAL_CPUS struct node_core_id id; - unsigned siblings; + unsigned siblings; id.coreid=0; #else unsigned nodeid; #endif /* Turn on caching if we haven't already */ - x86_enable_cache(); + x86_enable_cache(); amd_setup_mtrrs(); x86_mtrr_check(); @@ -386,11 +401,12 @@ void model_fxx_init(device_t dev) enable_cache(); -#if CONFIG_LOGICAL_CPUS==1 -//AMD_DUAL_CORE_SUPPORT + /* Enable the local cpu apics */ + setup_lapic(); + +#if CONFIG_LOGICAL_CPUS == 1 siblings = cpuid_ecx(0x80000008) & 0xff; -// id = get_node_core_id((!is_cpu_pre_e0())? read_nb_cfg_54():0); id = get_node_core_id(read_nb_cfg_54()); // pre e0 nb_cfg_54 can not be set if(siblings>0) { @@ -407,24 +423,24 @@ void model_fxx_init(device_t dev) wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } + /* Is this a bad location? In particular can another node prefecth * data from this node before we have initialized it? */ - if(id.coreid == 0) init_ecc_memory(id.nodeid); // only do it for core0 + if (id.coreid == 0) init_ecc_memory(id.nodeid); // only do it for core 0 #else - /* For now there is a 1-1 mapping between node_id and cpu_id */ - nodeid = lapicid() & 0x7; + /* Is this a bad location? In particular can another node prefecth + * data from this node before we have initialized it? + */ + nodeid = lapicid() & 0xf; init_ecc_memory(nodeid); #endif - - /* Enable the local cpu apics */ - setup_lapic(); #if CONFIG_LOGICAL_CPUS==1 -//AMD_DUAL_CORE_SUPPORT /* Start up my cpu siblings */ // if(id.coreid==0) amd_sibling_init(dev); // Don't need core1 is already be put in the CPU BUS in bus_cpu_scan #endif + } static struct device_operations cpu_dev_ops = { diff --git a/src/cpu/amd/model_fxx/model_fxx_msr.h b/src/cpu/amd/model_fxx/model_fxx_msr.h index c8d57bee1a..b4795cbbb2 100644 --- a/src/cpu/amd/model_fxx/model_fxx_msr.h +++ b/src/cpu/amd/model_fxx/model_fxx_msr.h @@ -3,6 +3,7 @@ #define HWCR_MSR 0xC0010015 #define NB_CFG_MSR 0xC001001f +#define LS_CFG_MSR 0xC0011020 #define IC_CFG_MSR 0xC0011021 #define DC_CFG_MSR 0xC0011022 #define BU_CFG_MSR 0xC0011023 diff --git a/src/cpu/amd/mtrr/amd_mtrr.c b/src/cpu/amd/mtrr/amd_mtrr.c index de4ed988c2..e57bb3bec7 100644 --- a/src/cpu/amd/mtrr/amd_mtrr.c +++ b/src/cpu/amd/mtrr/amd_mtrr.c @@ -96,26 +96,32 @@ static void set_fixed_mtrr_resource(void *gp, struct device *dev, struct resourc return; } printk_debug("Setting fixed MTRRs(%d-%d) Type: WB, RdMEM, WrMEM\n", - start_mtrr, last_mtrr); + start_mtrr, last_mtrr); set_fixed_mtrrs(start_mtrr, last_mtrr, MTRR_TYPE_WRBACK | MTRR_READ_MEM | MTRR_WRITE_MEM); } +extern void enable_fixed_mtrr(void); + void amd_setup_mtrrs(void) { + unsigned long address_bits; struct mem_state state; unsigned long i; msr_t msr; + /* Enable the access to AMD RdDram and WrDram extension bits */ + disable_cache(); msr = rdmsr(SYSCFG_MSR); msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; wrmsr(SYSCFG_MSR, msr); + enable_cache(); printk_debug("\n"); /* Initialized the fixed_mtrrs to uncached */ printk_debug("Setting fixed MTRRs(%d-%d) type: UC\n", - 0, NUM_FIXED_RANGES); + 0, NUM_FIXED_RANGES); set_fixed_mtrrs(0, NUM_FIXED_RANGES, MTRR_TYPE_UNCACHEABLE); /* Except for the PCI MMIO hole just before 4GB there are no @@ -127,6 +133,7 @@ void amd_setup_mtrrs(void) IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | IORESOURCE_CACHEABLE, set_fixed_mtrr_resource, &state); printk_debug("DONE fixed MTRRs\n"); + if (state.mmio_basek > state.tomk) { state.mmio_basek = state.tomk; } @@ -164,10 +171,17 @@ void amd_setup_mtrrs(void) msr.lo &= ~SYSCFG_MSR_MtrrFixDramModEn; wrmsr(SYSCFG_MSR, msr); + enable_fixed_mtrr(); + enable_cache(); + /* FIXME we should probably query the cpu for this + * but so far this is all any recent AMD cpu has supported. + */ + address_bits = 40; + /* Now that I have mapped what is memory and what is not * Setup the mtrrs so we can cache the memory. */ - x86_setup_mtrrs(); + x86_setup_var_mtrrs(address_bits); } |