summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/amd/car/cache_as_ram.inc220
-rw-r--r--src/cpu/amd/car/cache_as_ram.lds17
-rw-r--r--src/cpu/amd/car/cache_as_ram_post.c94
-rw-r--r--src/cpu/amd/car/copy_and_run.c132
-rw-r--r--src/cpu/amd/model_fxx/model_fxx_init.c6
-rw-r--r--src/cpu/x86/car/cache_as_ram.inc192
-rw-r--r--src/cpu/x86/car/cache_as_ram.lds11
-rw-r--r--src/cpu/x86/car/cache_as_ram_post.c82
-rw-r--r--src/cpu/x86/car/copy_and_run.c132
-rw-r--r--src/cpu/x86/lapic/boot_cpu.c2
10 files changed, 884 insertions, 4 deletions
diff --git a/src/cpu/amd/car/cache_as_ram.inc b/src/cpu/amd/car/cache_as_ram.inc
new file mode 100644
index 0000000000..dfa2b03ac3
--- /dev/null
+++ b/src/cpu/amd/car/cache_as_ram.inc
@@ -0,0 +1,220 @@
+/* by yhlu 6.2005 */
+/* We will use 4K bytes only */
+#define CacheSize DCACHE_RAM_SIZE
+#define CacheBase (0xd0000 - CacheSize)
+
+#include <cpu/x86/mtrr.h>
+#include <cpu/amd/mtrr.h>
+
+ /* Save the BIST result */
+ movl %eax, %ebp
+
+CacheAsRam:
+ /* hope we can skip the double set for normal part */
+#if USE_FALLBACK_IMAGE == 1
+
+ /* Set MtrrFixDramModEn for clear fixed mtrr */
+ xorl %eax, %eax # clear %eax and %edx
+ xorl %edx, %edx
+
+enable_fixed_mtrr_dram_modify:
+ movl $SYSCFG_MSR, %ecx
+ rdmsr
+ andl $(~(SYSCFG_MSR_MtrrFixDramEn|SYSCFG_MSR_MtrrVarDramEn)), %eax
+ orl $SYSCFG_MSR_MtrrFixDramModEn, %eax
+ wrmsr
+
+ /* Set the default memory type and enable fixed and variable MTRRs */
+ movl $MTRRdefType_MSR, %ecx
+ xorl %edx, %edx
+ /* Enable Variable and Fixed MTRRs */
+ movl $0x00000c00, %eax
+ wrmsr
+
+ /*Clear all MTRRs */
+
+ xorl %edx, %edx
+ movl $fixed_mtrr_msr, %esi
+clear_fixed_var_mtrr:
+ lodsl (%esi), %eax
+ testl %eax, %eax
+ jz clear_fixed_var_mtrr_out
+
+ movl %eax, %ecx
+ xorl %eax, %eax
+ wrmsr
+
+ jmp clear_fixed_var_mtrr
+clear_fixed_var_mtrr_out:
+
+ /* Enable the MTRRs and IORRs in SYSCFG */
+ movl $SYSCFG_MSR, %ecx
+ rdmsr
+ orl $(SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn), %eax
+ wrmsr
+
+#if 1
+#if CacheSize == 0x10000
+ /* enable caching for 64K using fixed mtrr */
+ movl $0x268, %ecx /* fix4k_c0000*/
+ movl $0x06060606, %eax /* WB IO type */
+ movl %eax, %edx
+ wrmsr
+ movl $0x269, %ecx
+ wrmsr
+#endif
+
+#if CacheSize == 0x8000
+ /* enable caching for 32K using fixed mtrr */
+ movl $0x269, %ecx /* fix4k_c8000*/
+ movl $0x06060606, %eax /* WB IO type */
+ movl %eax, %edx
+ wrmsr
+#endif
+
+ /* enable caching for 16K/8K/4K using fixed mtrr */
+ movl $0x269, %ecx /* fix4k_cc000*/
+#if CacheSize == 0x4000
+ movl $0x06060606, %edx /* WB IO type */
+#endif
+#if CacheSize == 0x2000
+ movl $0x06060000, %edx /* WB IO type */
+#endif
+#if CacheSize == 0x1000
+ movl $0x06000000, %edx /* WB IO type */
+#endif
+ xorl %eax, %eax
+ wrmsr
+
+#else
+ /* enable caching for 64K using variable mtrr */
+ movl $0x200, %ecx
+ xorl %edx, %edx
+ movl $(CacheBase | MTRR_TYPE_WRBACK), %eax
+ wrmsr
+
+ movl $0x201, %ecx
+ movl $0x0000000f, %edx /* AMD 40 bit 0xff*/
+ movl $((~((CacheBase + CacheSize) - 1)) | 0x800), %eax
+ wrmsr
+
+ /* make it to be IO by clearing RD Dram and WR Dram */
+ movl $IORR0_BASE, %ecx
+ xorl %edx, %edx
+ movl $CacheBase, %eax /* bit 3, and bit 4 = 0 mean clear RD ram and WR ram */
+ wrmsr
+
+ movl $IORR0_MASK, %ecx
+ movl $0x000000ff, %edx
+ movl $(~((CacheBase + CacheSize) - 1) | 0x800), %eax
+ wrmsr
+#endif
+
+ /* enable memory access for 0 - 1MB using top_mem */
+ movl $TOP_MEM, %ecx
+ xorl %edx, %edx
+ movl $(((CONFIG_LB_MEM_TOPK << 10) + TOP_MEM_MASK) & ~TOP_MEM_MASK) , %eax
+ wrmsr
+#else
+ /* disable cache */
+ movl %cr0, %eax
+ orl $(0x1<<30),%eax
+ movl %eax, %cr0
+
+#endif /* USE_FALLBACK_IMAGE == 1*/
+
+#if defined(XIP_ROM_SIZE) && defined(XIP_ROM_BASE)
+ /* enable write base caching so we can do execute in place
+ * on the flash rom.
+ */
+ movl $0x202, %ecx
+ xorl %edx, %edx
+ movl $(XIP_ROM_BASE | MTRR_TYPE_WRBACK), %eax
+ wrmsr
+
+ movl $0x203, %ecx
+ movl $0x0000000f, %edx
+ movl $(~(XIP_ROM_SIZE - 1) | 0x800), %eax
+ wrmsr
+#endif /* XIP_ROM_SIZE && XIP_ROM_BASE */
+
+ /* enable cache */
+ movl %cr0, %eax
+ andl $0x9fffffff,%eax
+ movl %eax, %cr0
+
+#if USE_FALLBACK_IMAGE == 1
+
+
+ /* Read the range with lodsl*/
+ movl $(CacheBase+CacheSize-4), %esi
+ std
+ movl $(CacheSize>>2), %ecx
+ rep lodsl
+ /* Clear the range */
+ movl $(CacheBase+CacheSize-4), %edi
+ movl $(CacheSize>>2), %ecx
+ xorl %eax, %eax
+ rep stosl
+
+#if 0
+ /* check the cache as ram */
+ movl $CacheBase, %esi
+ movl $(CacheSize>>2), %ecx
+.xin1:
+ movl %esi, %eax
+ movl %eax, (%esi)
+ movl $0x1000, %edx
+ movb %ah, %al
+.testx1:
+ outb %al, $0x80
+ decl %edx
+ jnz .testx1
+
+ movl (%esi), %eax
+ cmpb 0xff, %al
+ je .xin2 /* dont show */
+ movl $0x1000, %edx
+.testx2:
+ outb %al, $0x80
+ decl %edx
+ jnz .testx2
+
+.xin2: decl %ecx
+ je .xout1
+ add $4, %esi
+ jmp .xin1
+.xout1:
+
+#endif
+#endif /*USE_FALLBACK_IMAGE == 1*/
+
+
+ movl $(CacheBase+CacheSize-4), %eax
+ movl %eax, %esp
+
+
+ /* Restore the BIST result */
+ movl %ebp, %eax
+ /* We need to set ebp ? No need */
+ movl %esp, %ebp
+ pushl %eax /* bist */
+ call amd64_main
+ /* We will not go back */
+
+fixed_mtrr_msr:
+ .long 0x250, 0x258, 0x259
+ .long 0x268, 0x269, 0x26A
+ .long 0x26B, 0x26C, 0x26D
+ .long 0x26E, 0x26F
+var_mtrr_msr:
+ .long 0x200, 0x201, 0x202, 0x203
+ .long 0x204, 0x205, 0x206, 0x207
+ .long 0x208, 0x209, 0x20A, 0x20B
+ .long 0x20C, 0x20D, 0x20E, 0x20F
+var_iorr_msr:
+ .long 0xC0010016, 0xC0010017, 0xC0010018, 0xC0010019
+mem_top:
+ .long 0xC001001A, 0xC001001D
+ .long 0x000 /* NULL, end of table */
+.CacheAsRam_out:
diff --git a/src/cpu/amd/car/cache_as_ram.lds b/src/cpu/amd/car/cache_as_ram.lds
new file mode 100644
index 0000000000..5cca0bb283
--- /dev/null
+++ b/src/cpu/amd/car/cache_as_ram.lds
@@ -0,0 +1,17 @@
+/*
+ * init sections to place code running with cache as ram.
+ *
+ * 2004 by Stefan Reinauer <stepan@openbios.org>
+ */
+
+SECTIONS {
+ .init . : {
+ _init = .;
+ *(.init.text);
+ *(.init.rodata);
+ *(.init.rodata.*);
+ . = ALIGN(16);
+ _einit = .;
+ }
+
+}
diff --git a/src/cpu/amd/car/cache_as_ram_post.c b/src/cpu/amd/car/cache_as_ram_post.c
new file mode 100644
index 0000000000..66ca9fdf96
--- /dev/null
+++ b/src/cpu/amd/car/cache_as_ram_post.c
@@ -0,0 +1,94 @@
+/* by yhlu 6.2005 */
+ __asm__ volatile (
+ /*
+ FIXME : backup stack in CACHE_AS_RAM into mmx and sse and after we get STACK up, we restore that.
+ It is only needed if we want to go back
+ */
+
+ /* We don't need cache as ram for now on */
+ /* disable cache */
+ "movl %cr0, %eax\n\t"
+ "orl $(0x1<<30),%eax\n\t"
+ "movl %eax, %cr0\n\t"
+
+ /* clear sth */
+ "movl $0x269, %ecx\n\t" /* fix4k_c8000*/
+ "xorl %edx, %edx\n\t"
+ "xorl %eax, %eax\n\t"
+ "wrmsr\n\t"
+#if DCACHE_RAM_SIZE > 0x8000
+ "movl $0x268, %ecx\n\t" /* fix4k_c0000*/
+ "wrmsr\n\t"
+#endif
+
+ /* disable fixed mtrr from now on, it will be enabled by linuxbios_ram again*/
+ "movl $0xC0010010, %ecx\n\t"
+// "movl $SYSCFG_MSR, %ecx\n\t"
+ "rdmsr\n\t"
+ "andl $(~(3<<18)), %eax\n\t"
+// "andl $(~(SYSCFG_MSR_MtrrFixDramModEn | SYSCFG_MSR_MtrrFixDramEn)), %eax\n\t"
+ "wrmsr\n\t"
+
+ /* Set the default memory type and disable fixed and enable variable MTRRs */
+ "movl $0x2ff, %ecx\n\t"
+// "movl $MTRRdefType_MSR, %ecx\n\t"
+ "xorl %edx, %edx\n\t"
+ /* Enable Variable and Disable Fixed MTRRs */
+ "movl $0x00000800, %eax\n\t"
+ "wrmsr\n\t"
+
+#if defined(CLEAR_FIRST_1M_RAM)
+ /* enable caching for first 1M using variable mtrr */
+ "movl $0x200, %ecx\n\t"
+ "xorl %edx, %edx\n\t"
+ "movl $(0 | 1), %eax\n\t"
+// "movl $(0 | MTRR_TYPE_WRCOMB), %eax\n\t"
+ "wrmsr\n\t"
+
+ "movl $0x201, %ecx\n\t"
+ "movl $0x0000000f, %edx\n\t"
+ "movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
+ "wrmsr\n\t"
+#endif
+
+ /* enable cache */
+ "movl %cr0, %eax\n\t"
+ "andl $0x9fffffff,%eax\n\t"
+ "movl %eax, %cr0\n\t"
+#if defined(CLEAR_FIRST_1M_RAM)
+ /* clear the first 1M */
+ "movl $0x0, %edi\n\t"
+ "cld\n\t"
+ "movl $(0x100000>>2), %ecx\n\t"
+ "xorl %eax, %eax\n\t"
+ "rep stosl\n\t"
+
+ /* disable cache */
+ "movl %cr0, %eax\n\t"
+ "orl $(0x1<<30),%eax\n\t"
+ "movl %eax, %cr0\n\t"
+
+ /* enable caching for first 1M using variable mtrr */
+ "movl $0x200, %ecx\n\t"
+ "xorl %edx, %edx\n\t"
+ "movl $(0 | 6), %eax\n\t"
+// "movl $(0 | MTRR_TYPE_WRBACK), %eax\n\t"
+ "wrmsr\n\t"
+
+ "movl $0x201, %ecx\n\t"
+ "movl $0x0000000f, %edx\n\t"
+ "movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
+ "wrmsr\n\t"
+
+ /* enable cache */
+ "movl %cr0, %eax\n\t"
+ "andl $0x9fffffff,%eax\n\t"
+ "movl %eax, %cr0\n\t"
+ "invd\n\t"
+
+ /*
+ FIXME: I hope we don't need to change esp and ebp value here, so we can restore value from mmx sse back
+ But the problem is the range is some io related, So don't go back
+ */
+#endif
+ );
diff --git a/src/cpu/amd/car/copy_and_run.c b/src/cpu/amd/car/copy_and_run.c
new file mode 100644
index 0000000000..89a864d4fc
--- /dev/null
+++ b/src/cpu/amd/car/copy_and_run.c
@@ -0,0 +1,132 @@
+/* by yhlu 6.2005
+ moved from nrv2v.c and some lines from crt0.S
+*/
+#ifndef ENDIAN
+#define ENDIAN 0
+#endif
+#ifndef BITSIZE
+#define BITSIZE 32
+#endif
+
+#define GETBIT_8(bb, src, ilen) \
+ (((bb = bb & 0x7f ? bb*2 : ((unsigned)src[ilen++]*2+1)) >> 8) & 1)
+
+#define GETBIT_LE16(bb, src, ilen) \
+ (bb*=2,bb&0xffff ? (bb>>16)&1 : (ilen+=2,((bb=(src[ilen-2]+src[ilen-1]*256u)*2+1)>>16)&1))
+
+#define GETBIT_LE32(bb, src, ilen) \
+ (bc > 0 ? ((bb>>--bc)&1) : (bc=31,\
+ bb=*(const uint32_t *)((src)+ilen),ilen+=4,(bb>>31)&1))
+
+#if ENDIAN == 0 && BITSIZE == 8
+#define GETBIT(bb, src, ilen) GETBIT_8(bb, src, ilen)
+#endif
+#if ENDIAN == 0 && BITSIZE == 16
+#define GETBIT(bb, src, ilen) GETBIT_LE16(bb, src, ilen)
+#endif
+#if ENDIAN == 0 && BITSIZE == 32
+#define GETBIT(bb, src, ilen) GETBIT_LE32(bb, src, ilen)
+#endif
+
+static void copy_and_run(unsigned cpu_reset)
+{
+ uint8_t *src, *dst;
+ unsigned long dst_len;
+ unsigned long ilen = 0, olen = 0, last_m_off = 1;
+ uint32_t bb = 0;
+ unsigned bc = 0;
+
+ print_debug("Copying LinuxBIOS to ram.\r\n");
+
+#if !CONFIG_COMPRESS
+ __asm__ volatile (
+ "leal _liseg, %0\n\t"
+ "leal _iseg, %1\n\t"
+ "leal _eiseg, %2\n\t"
+ "subl %1, %2\n\t"
+ : "=a" (src), "=b" (dst), "=c" (dst_len)
+ );
+ memcpy(src, dst, dst_len);
+#else
+
+ __asm__ volatile (
+ "leal 4+_liseg, %0\n\t"
+ "leal _iseg, %1\n\t"
+ : "=a" (src) , "=b" (dst)
+ );
+
+#if CONFIG_USE_INIT
+ printk_debug("src=%08x\r\n",src);
+ printk_debug("dst=%08x\r\n",dst);
+#else
+ print_debug("src="); print_debug_hex32(src); print_debug("\r\n");
+ print_debug("dst="); print_debug_hex32(dst); print_debug("\r\n");
+#endif
+
+ for(;;) {
+ unsigned int m_off, m_len;
+ while(GETBIT(bb, src, ilen)) {
+ dst[olen++] = src[ilen++];
+ }
+ m_off = 1;
+ do {
+ m_off = m_off*2 + GETBIT(bb, src, ilen);
+ } while (!GETBIT(bb, src, ilen));
+ if (m_off == 2)
+ {
+ m_off = last_m_off;
+ }
+ else
+ {
+ m_off = (m_off - 3)*256 + src[ilen++];
+ if(m_off == 0xffffffffU)
+ break;
+ last_m_off = ++m_off;
+ }
+ m_len = GETBIT(bb, src, ilen);
+ m_len = m_len*2 + GETBIT(bb, src, ilen);
+ if (m_len == 0)
+ {
+ m_len++;
+ do {
+ m_len = m_len*2 + GETBIT(bb, src, ilen);
+ } while(!GETBIT(bb, src, ilen));
+ m_len += 2;
+ }
+ m_len += (m_off > 0xd00);
+ {
+ const uint8_t *m_pos;
+ m_pos = dst + olen - m_off;
+ dst[olen++] = *m_pos++;
+ do {
+ dst[olen++] = *m_pos++;
+ } while(--m_len > 0);
+ }
+ }
+#endif
+// dump_mem(dst, dst+0x100);
+#if CONFIG_USE_INIT
+ printk_debug("linxbios_ram.bin length = %08x\r\n", olen);
+#else
+ print_debug("linxbios_ram.bin length = "); print_debug_hex32(olen); print_debug("\r\n");
+#endif
+ print_debug("Jumping to LinuxBIOS.\r\n");
+
+ if(cpu_reset == 1 ) {
+ __asm__ volatile (
+ "movl $0xffffffff, %ebp\n\t"
+ );
+ }
+ else {
+ __asm__ volatile (
+ "xorl %ebp, %ebp\n\t"
+ );
+ }
+
+ __asm__ volatile (
+ "cli\n\t"
+ "leal _iseg, %edi\n\t"
+ "jmp %edi\n\t"
+ );
+
+}
diff --git a/src/cpu/amd/model_fxx/model_fxx_init.c b/src/cpu/amd/model_fxx/model_fxx_init.c
index fdaf05cb66..b75025b4c8 100644
--- a/src/cpu/amd/model_fxx/model_fxx_init.c
+++ b/src/cpu/amd/model_fxx/model_fxx_init.c
@@ -453,12 +453,12 @@ static struct cpu_device_id cpu_table[] = {
{ X86_VENDOR_AMD, 0x10f80 }, /* CH7-D0 */
{ X86_VENDOR_AMD, 0x10fb0 },
//AMD_E0_SUPPORT
- { X86_VENDOR_AMD, 0x20f50 }, /* SH7-E0*/
+ { X86_VENDOR_AMD, 0x20f50 }, /* SH8-E0*/
{ X86_VENDOR_AMD, 0x20f40 },
{ X86_VENDOR_AMD, 0x20f70 },
- { X86_VENDOR_AMD, 0x20fc0 }, /* DH7-E0 */ /* DH-E3 */
+ { X86_VENDOR_AMD, 0x20fc0 }, /* DH8-E0 */ /* DH-E3 */
{ X86_VENDOR_AMD, 0x20ff0 },
- { X86_VENDOR_AMD, 0x20f10 }, /* JH7-E0 */
+ { X86_VENDOR_AMD, 0x20f10 }, /* JH8-E1 */
{ X86_VENDOR_AMD, 0x20f30 },
{ X86_VENDOR_AMD, 0x20f51 }, /* SH-E4 */
{ X86_VENDOR_AMD, 0x20f71 },
diff --git a/src/cpu/x86/car/cache_as_ram.inc b/src/cpu/x86/car/cache_as_ram.inc
new file mode 100644
index 0000000000..d610efbb3b
--- /dev/null
+++ b/src/cpu/x86/car/cache_as_ram.inc
@@ -0,0 +1,192 @@
+/* We will use 4K bytes only */
+#define CacheSize DCACHE_RAM_SIZE
+#define CacheBase DCACHE_RAM_BASE
+
+#include <cpu/x86/mtrr.h>
+
+ /* Save the BIST result */
+ movl %eax, %ebp
+
+CacheAsRam:
+ /* hope we can skip the double set for normal part */
+#if USE_FALLBACK_IMAGE == 1
+
+ /*Clear all MTRRs */
+
+ xorl %edx, %edx
+ movl $fixed_mtrr_msr, %esi
+clear_fixed_var_mtrr:
+ lodsl (%esi), %eax
+ testl %eax, %eax
+ jz clear_fixed_var_mtrr_out
+
+ movl %eax, %ecx
+ xorl %eax, %eax
+ wrmsr
+
+ jmp clear_fixed_var_mtrr
+clear_fixed_var_mtrr_out:
+
+ /* enable caching for 64K using variable mtrr */
+ movl $0x200, %ecx
+ xorl %edx, %edx
+ movl $(CacheBase | MTRR_TYPE_WRBACK), %eax
+ wrmsr
+
+ movl $0x201, %ecx
+ movl $0x0000000f, %edx
+ movl $((~((CacheBase + CacheSize) - 1)) | 0x800), %eax
+ wrmsr
+
+ /* Set the default memory type and enable variable MTRRs */
+ movl $MTRRdefType_MSR, %ecx
+ xorl %edx, %edx
+ /* Enable Variable MTRRs */
+ movl $0x00000800, %eax
+ wrmsr
+
+ /* Disable fast string operation */
+ movl $0x1a0, %ecx
+ rdmsr
+ andl $(~0x1), %eax
+ wrmsr
+#else
+ /* disable cache */
+ movl %cr0, %eax
+ orl $(0x1<<30),%eax
+ movl %eax, %cr0
+
+#endif /* USE_FALLBACK_IMAGE == 1*/
+
+#if 0
+#if defined(XIP_ROM_SIZE) && defined(XIP_ROM_BASE)
+ /* enable write base caching so we can do execute in place
+ * on the flash rom.
+ */
+ movl $0x202, %ecx
+ xorl %edx, %edx
+ movl $(XIP_ROM_BASE | MTRR_TYPE_WRBACK), %eax
+ wrmsr
+
+ movl $0x203, %ecx
+ movl $0x0000000f, %edx
+ movl $(~(XIP_ROM_SIZE - 1) | 0x800), %eax
+ wrmsr
+#endif /* XIP_ROM_SIZE && XIP_ROM_BASE */
+#endif
+
+ /* enable cache */
+ movl %cr0, %eax
+ andl $0x9fffffff,%eax
+ movl %eax, %cr0
+
+#if USE_FALLBACK_IMAGE == 1
+
+// intel_chip_post_macro(0x11) /* post 11 */
+
+ /* Read the range with lodsl*/
+ movl $CacheBase, %esi
+ cld
+ movl $(CacheSize>>2), %ecx
+ rep lodsl
+
+ // Disable the cache. This is the trick. Processors
+ // Pentium Pro and above are designed to respond to cache
+ // hits with CD=1 and NW=1. That is read hits access the
+ // cache; write hits update the cache. With the tags
+ // established above and no snoop hit, the cache will
+ // behave as RAM.
+ movl %cr0, %eax
+ orl $0x60000000, %eax
+ movl %eax, %cr0
+
+ /* Clear the range */
+ movl $CacheBase, %edi
+ cld
+ movl $(CacheSize>>2), %ecx
+ xorl %eax, %eax
+ rep stosl
+
+
+#if 1
+ /* check the cache as ram */
+ movl $CacheBase, %esi
+ movl $(CacheSize>>2), %ecx
+.xin1:
+ movl %esi, %eax
+ movl %eax, (%esi)
+ decl %ecx
+ je .xout1
+ add $4, %esi
+ jmp .xin1
+.xout1:
+
+ movl $CacheBase, %esi
+// movl $(CacheSize>>2), %ecx
+ movl $4, %ecx
+.xin1x:
+ movl %esi, %eax
+
+ movl $0x4000, %edx
+ movb %ah, %al
+.testx1:
+ outb %al, $0x80
+ decl %edx
+ jnz .testx1
+
+ movl (%esi), %eax
+ cmpb 0xff, %al
+ je .xin2 /* dont show */
+
+ movl $0x4000, %edx
+.testx2:
+ outb %al, $0x80
+ decl %edx
+ jnz .testx2
+
+.xin2: decl %ecx
+ je .xout1x
+ add $4, %esi
+ jmp .xin1x
+.xout1x:
+
+#endif
+#endif /*USE_FALLBACK_IMAGE == 1*/
+
+// intel_chip_post_macro(0x12) /* post 12 */
+
+ movl $(CacheBase+CacheSize-4), %eax
+ movl %eax, %esp
+
+ /* Load a different set of data segments */
+#if CONFIG_USE_INIT
+ movw $CACHE_RAM_DATA_SEG, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %ss
+#endif
+
+lout:
+// intel_chip_post_macro(0x13) /* post 13 */
+
+ /* Restore the BIST result */
+ movl %ebp, %eax
+ /* We need to set ebp ? No need */
+ movl %esp, %ebp
+ pushl %eax /* bist */
+ call amd64_main
+ /* We will not go back */
+
+
+fixed_mtrr_msr:
+ .long 0x250, 0x258, 0x259
+ .long 0x268, 0x269, 0x26A
+ .long 0x26B, 0x26C, 0x26D
+ .long 0x26E, 0x26F
+var_mtrr_msr:
+ .long 0x200, 0x201, 0x202, 0x203
+ .long 0x204, 0x205, 0x206, 0x207
+ .long 0x208, 0x209, 0x20A, 0x20B
+ .long 0x20C, 0x20D, 0x20E, 0x20F
+ .long 0x000 /* NULL, end of table */
+.CacheAsRam_out:
diff --git a/src/cpu/x86/car/cache_as_ram.lds b/src/cpu/x86/car/cache_as_ram.lds
new file mode 100644
index 0000000000..7f1b373dba
--- /dev/null
+++ b/src/cpu/x86/car/cache_as_ram.lds
@@ -0,0 +1,11 @@
+SECTIONS {
+ .init . : {
+ _init = .;
+ *(.init.text);
+ *(.init.rodata);
+ *(.init.rodata.*);
+ . = ALIGN(16);
+ _einit = .;
+ }
+
+}
diff --git a/src/cpu/x86/car/cache_as_ram_post.c b/src/cpu/x86/car/cache_as_ram_post.c
new file mode 100644
index 0000000000..8cbc1e9cba
--- /dev/null
+++ b/src/cpu/x86/car/cache_as_ram_post.c
@@ -0,0 +1,82 @@
+
+ __asm__ volatile (
+ /*
+ FIXME : backup stack in CACHE_AS_RAM into mmx and sse and after we get STACK up, we restore that.
+ It is only needed if we want to go back
+ */
+
+ /* We don't need cache as ram for now on */
+ /* disable cache */
+ "movl %cr0, %eax\n\t"
+ "orl $(0x1<<30),%eax\n\t"
+ "movl %eax, %cr0\n\t"
+
+ /* clear sth */
+ "movl $0x200, %ecx\n\t"
+ "xorl %edx, %edx\n\t"
+ "xorl %eax, %eax\n\t"
+ "wrmsr\n\t"
+ "movl $0x201, %ecx\n\t"
+ "wrmsr\n\t"
+
+ /* enable fast string operation */
+ "movl $0x1a0, %ecx\n\t"
+ "rdmsr\n\t"
+ "orl $1, %eax\n\t"
+ "wrmsr\n\t"
+
+#if defined(CLEAR_FIRST_1M_RAM)
+ /* enable caching for first 1M using variable mtrr */
+ "movl $0x200, %ecx\n\t"
+ "xorl %edx, %edx\n\t"
+ "movl $(0 | 1), %eax\n\t"
+// "movl $(0 | MTRR_TYPE_WRCOMB), %eax\n\t"
+ "wrmsr\n\t"
+
+ "movl $0x201, %ecx\n\t"
+ "movl $0x0000000f, %edx\n\t" /* AMD 40 bit 0xff*/
+ "movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
+ "wrmsr\n\t"
+#endif
+
+ /* enable cache */
+ "movl %cr0, %eax\n\t"
+ "andl $0x9fffffff,%eax\n\t"
+ "movl %eax, %cr0\n\t"
+#if defined(CLEAR_FIRST_1M_RAM)
+ /* clear the first 1M */
+ "movl $0x0, %edi\n\t"
+ "cld\n\t"
+ "movl $(0x100000>>2), %ecx\n\t"
+ "xorl %eax, %eax\n\t"
+ "rep stosl\n\t"
+
+ /* disable cache */
+ "movl %cr0, %eax\n\t"
+ "orl $(0x1<<30),%eax\n\t"
+ "movl %eax, %cr0\n\t"
+
+ /* enable caching for first 1M using variable mtrr */
+ "movl $0x200, %ecx\n\t"
+ "xorl %edx, %edx\n\t"
+ "movl $(0 | 6), %eax\n\t"
+// "movl $(0 | MTRR_TYPE_WRBACK), %eax\n\t"
+ "wrmsr\n\t"
+
+ "movl $0x201, %ecx\n\t"
+ "movl $0x0000000f, %edx\n\t" /* AMD 40 bit 0xff*/
+ "movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
+ "wrmsr\n\t"
+
+ /* enable cache */
+ "movl %cr0, %eax\n\t"
+ "andl $0x9fffffff,%eax\n\t"
+ "movl %eax, %cr0\n\t"
+ "invd\n\t"
+
+ /*
+ FIXME: I hope we don't need to change esp and ebp value here, so we can restore value from mmx sse back
+ But the problem is the range is some io related, So don't go back
+ */
+#endif
+ );
diff --git a/src/cpu/x86/car/copy_and_run.c b/src/cpu/x86/car/copy_and_run.c
new file mode 100644
index 0000000000..89a864d4fc
--- /dev/null
+++ b/src/cpu/x86/car/copy_and_run.c
@@ -0,0 +1,132 @@
+/* by yhlu 6.2005
+ moved from nrv2v.c and some lines from crt0.S
+*/
+#ifndef ENDIAN
+#define ENDIAN 0
+#endif
+#ifndef BITSIZE
+#define BITSIZE 32
+#endif
+
+#define GETBIT_8(bb, src, ilen) \
+ (((bb = bb & 0x7f ? bb*2 : ((unsigned)src[ilen++]*2+1)) >> 8) & 1)
+
+#define GETBIT_LE16(bb, src, ilen) \
+ (bb*=2,bb&0xffff ? (bb>>16)&1 : (ilen+=2,((bb=(src[ilen-2]+src[ilen-1]*256u)*2+1)>>16)&1))
+
+#define GETBIT_LE32(bb, src, ilen) \
+ (bc > 0 ? ((bb>>--bc)&1) : (bc=31,\
+ bb=*(const uint32_t *)((src)+ilen),ilen+=4,(bb>>31)&1))
+
+#if ENDIAN == 0 && BITSIZE == 8
+#define GETBIT(bb, src, ilen) GETBIT_8(bb, src, ilen)
+#endif
+#if ENDIAN == 0 && BITSIZE == 16
+#define GETBIT(bb, src, ilen) GETBIT_LE16(bb, src, ilen)
+#endif
+#if ENDIAN == 0 && BITSIZE == 32
+#define GETBIT(bb, src, ilen) GETBIT_LE32(bb, src, ilen)
+#endif
+
+static void copy_and_run(unsigned cpu_reset)
+{
+ uint8_t *src, *dst;
+ unsigned long dst_len;
+ unsigned long ilen = 0, olen = 0, last_m_off = 1;
+ uint32_t bb = 0;
+ unsigned bc = 0;
+
+ print_debug("Copying LinuxBIOS to ram.\r\n");
+
+#if !CONFIG_COMPRESS
+ __asm__ volatile (
+ "leal _liseg, %0\n\t"
+ "leal _iseg, %1\n\t"
+ "leal _eiseg, %2\n\t"
+ "subl %1, %2\n\t"
+ : "=a" (src), "=b" (dst), "=c" (dst_len)
+ );
+ memcpy(src, dst, dst_len);
+#else
+
+ __asm__ volatile (
+ "leal 4+_liseg, %0\n\t"
+ "leal _iseg, %1\n\t"
+ : "=a" (src) , "=b" (dst)
+ );
+
+#if CONFIG_USE_INIT
+ printk_debug("src=%08x\r\n",src);
+ printk_debug("dst=%08x\r\n",dst);
+#else
+ print_debug("src="); print_debug_hex32(src); print_debug("\r\n");
+ print_debug("dst="); print_debug_hex32(dst); print_debug("\r\n");
+#endif
+
+ for(;;) {
+ unsigned int m_off, m_len;
+ while(GETBIT(bb, src, ilen)) {
+ dst[olen++] = src[ilen++];
+ }
+ m_off = 1;
+ do {
+ m_off = m_off*2 + GETBIT(bb, src, ilen);
+ } while (!GETBIT(bb, src, ilen));
+ if (m_off == 2)
+ {
+ m_off = last_m_off;
+ }
+ else
+ {
+ m_off = (m_off - 3)*256 + src[ilen++];
+ if(m_off == 0xffffffffU)
+ break;
+ last_m_off = ++m_off;
+ }
+ m_len = GETBIT(bb, src, ilen);
+ m_len = m_len*2 + GETBIT(bb, src, ilen);
+ if (m_len == 0)
+ {
+ m_len++;
+ do {
+ m_len = m_len*2 + GETBIT(bb, src, ilen);
+ } while(!GETBIT(bb, src, ilen));
+ m_len += 2;
+ }
+ m_len += (m_off > 0xd00);
+ {
+ const uint8_t *m_pos;
+ m_pos = dst + olen - m_off;
+ dst[olen++] = *m_pos++;
+ do {
+ dst[olen++] = *m_pos++;
+ } while(--m_len > 0);
+ }
+ }
+#endif
+// dump_mem(dst, dst+0x100);
+#if CONFIG_USE_INIT
+ printk_debug("linxbios_ram.bin length = %08x\r\n", olen);
+#else
+ print_debug("linxbios_ram.bin length = "); print_debug_hex32(olen); print_debug("\r\n");
+#endif
+ print_debug("Jumping to LinuxBIOS.\r\n");
+
+ if(cpu_reset == 1 ) {
+ __asm__ volatile (
+ "movl $0xffffffff, %ebp\n\t"
+ );
+ }
+ else {
+ __asm__ volatile (
+ "xorl %ebp, %ebp\n\t"
+ );
+ }
+
+ __asm__ volatile (
+ "cli\n\t"
+ "leal _iseg, %edi\n\t"
+ "jmp %edi\n\t"
+ );
+
+}
diff --git a/src/cpu/x86/lapic/boot_cpu.c b/src/cpu/x86/lapic/boot_cpu.c
index d3a8f6e7a7..bca73e137e 100644
--- a/src/cpu/x86/lapic/boot_cpu.c
+++ b/src/cpu/x86/lapic/boot_cpu.c
@@ -1,6 +1,6 @@
#include <cpu/x86/msr.h>
-int boot_cpu(void)
+static int boot_cpu(void)
{
int bsp;
msr_t msr;