summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/k8/apic_timer.c26
-rw-r--r--src/cpu/k8/cpufixup.c34
-rw-r--r--src/cpu/k8/earlymtrr.c87
-rw-r--r--src/cpu/p5/cpuid.c13
-rw-r--r--src/cpu/p6/boot_cpu.c12
-rw-r--r--src/cpu/p6/mtrr.c72
6 files changed, 190 insertions, 54 deletions
diff --git a/src/cpu/k8/apic_timer.c b/src/cpu/k8/apic_timer.c
new file mode 100644
index 0000000000..fa7e9b905f
--- /dev/null
+++ b/src/cpu/k8/apic_timer.c
@@ -0,0 +1,26 @@
+#include <stdint.h>
+#include <delay.h>
+#include <cpu/p6/msr.h>
+#include <cpu/p6/apic.h>
+
+void init_timer(void)
+{
+ /* Set the apic timer to no interrupts and periodic mode */
+ apic_write(APIC_LVTT, (1 << 17)|(1<< 16)|(0 << 12)|(0 << 0));
+ /* Set the divider to 1, no divider */
+ apic_write(APIC_TDCR, APIC_TDR_DIV_1);
+ /* Set the initial counter to 0xffffffff */
+ apic_write(APIC_TMICT, 0xffffffff);
+}
+
+void udelay(unsigned usecs)
+{
+ uint32_t start, value, ticks;
+ /* Calculate the number of ticks to run, our FSB runs a 200Mhz */
+ ticks = usecs * 200;
+ start = apic_read(APIC_TMCCT);
+ do {
+ value = apic_read(APIC_TMCCT);
+ } while((start - value) < ticks);
+
+}
diff --git a/src/cpu/k8/cpufixup.c b/src/cpu/k8/cpufixup.c
index 9f306d1156..8e7ad95d5d 100644
--- a/src/cpu/k8/cpufixup.c
+++ b/src/cpu/k8/cpufixup.c
@@ -13,7 +13,8 @@
void k8_cpufixup(struct mem_range *mem)
{
- unsigned long lo = 0, hi = 0, i;
+ msr_t msr;
+ unsigned long i;
unsigned long ram_megabytes;
/* For now no Athlon board has significant holes in it's
@@ -27,33 +28,34 @@ void k8_cpufixup(struct mem_range *mem)
ram_megabytes = (mem[i-1].basek + mem[i-1].sizek) *1024;
+#warning "FIXME handle > 4GB of ram"
// 8 MB alignment please
ram_megabytes += 0x7fffff;
ram_megabytes &= (~0x7fffff);
// set top_mem registers to ram size
printk_spew("Setting top_mem to 0x%x\n", ram_megabytes);
- rdmsr(TOP_MEM, lo, hi);
- printk_spew("TOPMEM was 0x%02x:0x%02x\n", hi, lo);
- hi = 0;
- lo = ram_megabytes;
- wrmsr(TOP_MEM, lo, hi);
+ msr = rdmsr(TOP_MEM);
+ printk_spew("TOPMEM was 0x%02x:0x%02x\n", msr.hi, msr.lo);
+ msr.hi = 0;
+ msr.lo = ram_megabytes;
+ wrmsr(TOP_MEM, msr);
// I am setting this even though I won't enable it
- wrmsr(TOP_MEM2, lo, hi);
+ wrmsr(TOP_MEM2, msr);
/* zero the IORR's before we enable to prevent
* undefined side effects
*/
- lo = hi = 0;
+ msr.lo = msr.hi = 0;
for (i = IORR_FIRST; i <= IORR_LAST; i++)
- wrmsr(i, lo, hi);
-
- rdmsr(SYSCFG, lo, hi);
- printk_spew("SYSCFG was 0x%x:0x%x\n", hi, lo);
- lo |= MTRRVARDRAMEN;
- wrmsr(SYSCFG, lo, hi);
- rdmsr(SYSCFG, lo, hi);
- printk_spew("SYSCFG IS NOW 0x%x:0x%x\n", hi, lo);
+ wrmsr(i, msr);
+
+ msr = rdmsr(SYSCFG);
+ printk_spew("SYSCFG was 0x%x:0x%x\n", msr.hi, msr.lo);
+ msr.lo |= MTRRVARDRAMEN;
+ wrmsr(SYSCFG, msr);
+ msr = rdmsr(SYSCFG);
+ printk_spew("SYSCFG IS NOW 0x%x:0x%x\n", msr.hi, msr.lo);
}
diff --git a/src/cpu/k8/earlymtrr.c b/src/cpu/k8/earlymtrr.c
new file mode 100644
index 0000000000..47ddd12340
--- /dev/null
+++ b/src/cpu/k8/earlymtrr.c
@@ -0,0 +1,87 @@
+#include <cpu/k8/mtrr.h>
+
+/* the fixed and variable MTTRs are power-up with random values,
+ * clear them to MTRR_TYPE_UNCACHEABLE for safty.
+ */
+
+static void early_mtrr_init(void)
+{
+ static unsigned long mtrr_msrs[] = {
+ /* fixed mtrr */
+ 0x250, 0x258, 0x259,
+ 0x268, 0x269, 0x26A
+ 0x26B, 0x26C, 0x26D
+ 0x26E, 0x26F,
+ /* var mtrr */
+ 0x200, 0x201, 0x202, 0x203,
+ 0x204, 0x205, 0x206, 0x207,
+ 0x208, 0x209, 0x20A, 0x20B,
+ 0x20C, 0x20D, 0x20E, 0x20F,
+ /* var iorr msr */
+ 0xC0010016, 0xC0010017, 0xC0010018, 0xC0010019,
+ /* mem top */
+ 0xC001001A, 0xC001001D,
+ /* NULL end of table */
+ 0
+ };
+ msr_t msr;
+ unsigned long *msr_addr;
+
+ /* Inialize all of the relevant msrs to 0 */
+ msr.lo = 0;
+ msr.hi = 0;
+ for(msr_addr = &mtrr_msrs; *msr_addr; msr_addr++) {
+ wrmsr(*msr_addr, msr);
+ }
+
+ /* Enable memory access for 0 - 8MB using top_mem */
+ msr.hi = 0;
+ msr.lo = 0x08000000;
+ wrmsr(TOP_MEM, msr);
+
+ /* Enable caching for 0 - 128MB using variable mtrr */
+ msr = rdmsr(0x200);
+ msr.hi &= 0xfffffff0;
+ msr.hi |= 0x00000000;
+ msr.lo &= 0x00000f00;
+ msr.lo |= 0x00000006;
+ wrmsr(0x200, msr);
+
+ msr = rdmsr(0x201);
+ msr.hi &= 0xfffffff0;
+ msr.hi |= 0x0000000f;
+ msr.lo &= 0x000007ff;
+ msr.lo |= 0xf0000800;
+ wrmsr(0x201, msr);
+
+#if defined(XIP_ROM_SIZE) && defined(XIP_ROM_BASE)
+ /* enable write back caching so we can do execute in place
+ * on the flash rom.
+ */
+ msr.hi = 0x00000000;
+ msr.lo = XIP_ROM_BASE | 0x005;
+ wrmsr(0x202);
+#error "FIXME verify the type of MTRR I have setup"
+ msr.hi = 0x0000000f;
+ msr.lo = ~(XIP_ROM_SIZE - 1) | 0x800;
+ wrmsr(0x203);
+#endif
+
+ /* Set the default memory type and enable fixed and variable MTRRs
+ */
+ /* Enable Variable MTRRs */
+ msr.hi = 0x00000000;
+ msr.lo = 0x00000800;
+ wrmsr(0x2ff, msr);
+
+ /* Enale the MTRRs in SYSCFG */
+ msr = rdmsr(SYSCFG_MSR);
+ msr.lo |= SYSCFG_MSR_MtrrrVarDramEn;
+ wrmsr(SYSCFG_MSR, msr);
+
+ /* Enable the cache */
+ unsigned long cr0;
+ cr0 = read_cr0();
+ cr0 &= 0x9fffffff;
+ write_cr0(cr0);
+}
diff --git a/src/cpu/p5/cpuid.c b/src/cpu/p5/cpuid.c
index d98ce13a8e..2d3d3a87b5 100644
--- a/src/cpu/p5/cpuid.c
+++ b/src/cpu/p5/cpuid.c
@@ -9,21 +9,20 @@ int mtrr_check(void)
{
#ifdef i686
/* Only Pentium Pro and later have MTRR */
- unsigned long low, high;
-
+ msr_t msr;
printk_debug("\nMTRR check\n");
- rdmsr(0x2ff, low, high);
- low = low >> 10;
+ msr = rdmsr(0x2ff);
+ msr.lo >>= 10;
printk_debug("Fixed MTRRs : ");
- if (low & 0x01)
+ if (msr.lo & 0x01)
printk_debug("Enabled\n");
else
printk_debug("Disabled\n");
printk_debug("Variable MTRRs: ");
- if (low & 0x02)
+ if (msr.lo & 0x02)
printk_debug("Enabled\n");
else
printk_debug("Disabled\n");
@@ -31,7 +30,7 @@ int mtrr_check(void)
printk_debug("\n");
post_code(0x93);
- return ((int) low);
+ return ((int) msr.lo);
#else /* !i686 */
return 0;
#endif /* i686 */
diff --git a/src/cpu/p6/boot_cpu.c b/src/cpu/p6/boot_cpu.c
new file mode 100644
index 0000000000..803eecdd5d
--- /dev/null
+++ b/src/cpu/p6/boot_cpu.c
@@ -0,0 +1,12 @@
+#include <cpu/p6/msr.h>
+
+int boot_cpu(void)
+{
+ volatile unsigned long *local_apic;
+ unsigned long apic_id;
+ int bsp;
+ msr_t msr;
+ msr = rdmsr(0x1b);
+ bsp = !!(msr.lo & (1 << 8));
+ return bsp;
+}
diff --git a/src/cpu/p6/mtrr.c b/src/cpu/p6/mtrr.c
index 413acb1883..b067883316 100644
--- a/src/cpu/p6/mtrr.c
+++ b/src/cpu/p6/mtrr.c
@@ -40,20 +40,20 @@ static unsigned int mtrr_msr[] = {
static void intel_enable_fixed_mtrr(void)
{
- unsigned long low, high;
+ msr_t msr;
- rdmsr(MTRRdefType_MSR, low, high);
- low |= 0xc00;
- wrmsr(MTRRdefType_MSR, low, high);
+ msr = rdmsr(MTRRdefType_MSR);
+ msr.lo |= 0xc00;
+ wrmsr(MTRRdefType_MSR, msr);
}
static void intel_enable_var_mtrr(void)
{
- unsigned long low, high;
+ msr_t msr;
- rdmsr(MTRRdefType_MSR, low, high);
- low |= 0x800;
- wrmsr(MTRRdefType_MSR, low, high);
+ msr = rdmsr(MTRRdefType_MSR);
+ msr.lo |= 0x800;
+ wrmsr(MTRRdefType_MSR, msr);
}
static inline void disable_cache(void)
@@ -86,19 +86,18 @@ static inline void enable_cache(void)
/* setting variable mtrr, comes from linux kernel source */
static void intel_set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, unsigned char type)
{
- unsigned long base_high, base_low;
- unsigned long mask_high, mask_low;
+ msr_t base, mask;
- base_high = basek >> 22;
- base_low = basek << 10;
+ base.hi = basek >> 22;
+ base.lo = basek << 10;
if (sizek < 4*1024*1024) {
- mask_high = 0x0F;
- mask_low = ~((sizek << 10) -1);
+ mask.hi = 0x0F;
+ mask.lo = ~((sizek << 10) -1);
}
else {
- mask_high = 0x0F & (~((sizek >> 22) -1));
- mask_low = 0;
+ mask.hi = 0x0F & (~((sizek >> 22) -1));
+ mask.lo = 0;
}
if (reg >= 8)
@@ -108,13 +107,17 @@ static void intel_set_var_mtrr(unsigned int reg, unsigned long basek, unsigned l
// do this.
disable_cache();
if (sizek == 0) {
+ msr_t zero;
+ zero.lo = zero.hi = 0;
/* The invalid bit is kept in the mask, so we simply clear the
relevant mask register to disable a range. */
- wrmsr (MTRRphysMask_MSR (reg), 0, 0);
+ wrmsr (MTRRphysMask_MSR(reg), zero);
} else {
/* Bit 32-35 of MTRRphysMask should be set to 1 */
- wrmsr (MTRRphysBase_MSR(reg), base_low | type, base_high);
- wrmsr (MTRRphysMask_MSR(reg), mask_low | 0x800, mask_high);
+ base.lo |= type;
+ mask.lo |= 0x800;
+ wrmsr (MTRRphysBase_MSR(reg), base);
+ wrmsr (MTRRphysMask_MSR(reg), mask);
}
enable_cache();
}
@@ -131,11 +134,18 @@ void set_var_mtrr(unsigned int reg, unsigned long base, unsigned long size, unsi
if (size == 0) {
/* The invalid bit is kept in the mask, so we simply clear the
relevant mask register to disable a range. */
- wrmsr (MTRRphysMask_MSR (reg), 0, 0);
+ msr_t zero;
+ zero.lo = zero.hi = 0;
+ wrmsr (MTRRphysMask_MSR(reg), zero);
} else {
/* Bit 32-35 of MTRRphysMask should be set to 1 */
- wrmsr (MTRRphysBase_MSR (reg), base | type, 0);
- wrmsr (MTRRphysMask_MSR (reg), ~(size - 1) | 0x800, 0x0F);
+ msr_t basem, maskm;
+ basem.lo = base | type;
+ basem.hi = 0;
+ maskm.lo = ~(size - 1) | 0x800;
+ maskm.hi = 0x0F;
+ wrmsr (MTRRphysBase_MSR(reg), basem);
+ wrmsr (MTRRphysMask_MSR(reg), maskm);
}
// turn cache back on.
@@ -197,32 +207,32 @@ static void set_fixed_mtrrs(unsigned int first, unsigned int last, unsigned char
{
unsigned int i;
unsigned int fixed_msr = NUM_FIXED_RANGES >> 3;
- unsigned long low, high;
- low = high = 0; /* Shut up gcc */
+ msr_t msr;
+ msr.lo = msr.hi = 0; /* Shut up gcc */
for(i = first; i < last; i++) {
/* When I switch to a new msr read it in */
if (fixed_msr != i >> 3) {
/* But first write out the old msr */
if (fixed_msr < (NUM_FIXED_RANGES >> 3)) {
disable_cache();
- wrmsr(mtrr_msr[fixed_msr], low, high);
+ wrmsr(mtrr_msr[fixed_msr], msr);
enable_cache();
}
fixed_msr = i>>3;
- rdmsr(mtrr_msr[fixed_msr], low, high);
+ msr = rdmsr(mtrr_msr[fixed_msr]);
}
if ((i & 7) < 4) {
- low &= ~(0xff << ((i&3)*8));
- low |= type << ((i&3)*8);
+ msr.lo &= ~(0xff << ((i&3)*8));
+ msr.lo |= type << ((i&3)*8);
} else {
- high &= ~(0xff << ((i&3)*8));
- high |= type << ((i&3)*8);
+ msr.hi &= ~(0xff << ((i&3)*8));
+ msr.hi |= type << ((i&3)*8);
}
}
/* Write out the final msr */
if (fixed_msr < (NUM_FIXED_RANGES >> 3)) {
disable_cache();
- wrmsr(mtrr_msr[fixed_msr], low, high);
+ wrmsr(mtrr_msr[fixed_msr], msr);
enable_cache();
}
}