aboutsummaryrefslogtreecommitdiff
path: root/src/devices
diff options
context:
space:
mode:
authorStefan Reinauer <stepan@coresystems.de>2010-04-21 20:06:10 +0000
committerStefan Reinauer <stepan@openbios.org>2010-04-21 20:06:10 +0000
commit9839cbd53fdcfcee52c406d9f52af924192e618d (patch)
treea38daaa0b545aaf36a7ad5f5df9dfe73e08d97da /src/devices
parentcf036d1266d7ec307aac437105b094acbc9681ec (diff)
* clean up all but two warnings on artecgroup dbe61
* integrate vsm init into normal x86.c code (so it can run above 1M) * call void main(unsigned long bist) except void cache_as_ram_main(void) on Geode LX (as we do on almost all other platforms now) * Unify Geode LX MSR setup (will bring most non-working LX targets back to life) Signed-off-by: Stefan Reinauer <stepan@coresystems.de> Acked-by: Patrick Georgi <patrick.georgi@coresystems.de> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5471 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/devices')
-rw-r--r--src/devices/oprom/x86.c87
-rw-r--r--src/devices/oprom/x86_asm.S99
2 files changed, 185 insertions, 1 deletions
diff --git a/src/devices/oprom/x86.c b/src/devices/oprom/x86.c
index 2ce5b4534d..436f57b8bc 100644
--- a/src/devices/oprom/x86.c
+++ b/src/devices/oprom/x86.c
@@ -1,7 +1,8 @@
/*
* This file is part of the coreboot project.
*
- * Copyright (C) 2009 coresystems GmbH
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009-2010 coresystems GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -36,9 +37,11 @@ void x86_exception(struct eregs *info);
extern unsigned char __idt_handler, __idt_handler_size;
extern unsigned char __realmode_code, __realmode_code_size;
extern unsigned char __run_optionrom, __run_interrupt;
+extern unsigned char __run_vsa;
void (*run_optionrom)(u32 devfn) __attribute__((regparm(0))) = (void *)&__run_optionrom;
void (*vga_enable_console)(void) __attribute__((regparm(0))) = (void *)&__run_interrupt;
+void (*run_vsa)(u32 smm, u32 sysmem) __attribute__((regparm(0))) = (void *)&__run_vsa;
int (*intXX_handler[256])(struct eregs *regs) = { NULL };
@@ -160,6 +163,88 @@ void run_bios(struct device *dev, unsigned long addr)
printk(BIOS_DEBUG, "... Option ROM returned.\n");
}
+#if defined(CONFIG_GEODE_VSA) && CONFIG_GEODE_VSA
+#include <cpu/amd/lxdef.h>
+#include <cpu/amd/vr.h>
+#include <cbfs.h>
+
+#define VSA2_BUFFER 0x60000
+#define VSA2_ENTRY_POINT 0x60020
+
+// TODO move to a header file.
+void do_vsmbios(void);
+
+/* VSA virtual register helper */
+static u32 VSA_vrRead(u16 classIndex)
+{
+ u32 eax, ebx, ecx, edx;
+ asm volatile (
+ "movw $0x0AC1C, %%dx\n"
+ "orl $0x0FC530000, %%eax\n"
+ "outl %%eax, %%dx\n"
+ "addb $2, %%dl\n"
+ "inw %%dx, %%ax\n"
+ : "=a" (eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
+ : "a"(classIndex)
+ );
+
+ return eax;
+}
+
+void do_vsmbios(void)
+{
+ printk(BIOS_DEBUG, "Preparing for VSA...\n");
+
+ /* clear bios data area */
+ memset((void *)0x400, 0, 0x200);
+
+ /* Set up C interrupt handlers */
+ setup_interrupt_handlers();
+
+ /* Setting up realmode IDT */
+ setup_realmode_idt();
+
+ memcpy(REALMODE_BASE, &__realmode_code, (size_t)&__realmode_code_size);
+ printk(BIOS_SPEW, "VSA: Real mode stub @%p: %d bytes\n", REALMODE_BASE,
+ (u32)&__realmode_code_size);
+
+ if ((unsigned int)cbfs_load_stage("vsa") != VSA2_ENTRY_POINT) {
+ printk(BIOS_ERR, "Failed to load VSA.\n");
+ return;
+ }
+
+ unsigned char *buf = (unsigned char *)VSA2_BUFFER;
+ printk(BIOS_DEBUG, "VSA: Buffer @%p *[0k]=%02x\n", buf, buf[0]);
+ printk(BIOS_DEBUG, "VSA: Signature *[0x20-0x23] is %02x:%02x:%02x:%02x\n",
+ buf[0x20], buf[0x21], buf[0x22], buf[0x23]);
+
+ /* Check for code to emit POST code at start of VSA. */
+ if ((buf[0x20] != 0xb0) || (buf[0x21] != 0x10) ||
+ (buf[0x22] != 0xe6) || (buf[0x23] != 0x80)) {
+ printk(BIOS_WARNING, "VSA: Signature incorrect. Install failed.\n");
+ return;
+ }
+
+ printk(BIOS_DEBUG, "Calling VSA module...\n");
+ /* ECX gets SMM, EDX gets SYSMEM */
+ run_vsa(MSR_GLIU0_SMM, MSR_GLIU0_SYSMEM);
+ printk(BIOS_DEBUG, "... VSA module returned.\n");
+
+ /* Restart timer 1 */
+ outb(0x56, 0x43);
+ outb(0x12, 0x41);
+
+ /* Check that VSA is running OK */
+ if (VSA_vrRead(SIGNATURE) == VSA2_SIGNATURE)
+ printk(BIOS_DEBUG, "VSM: VSA2 VR signature verified.\n");
+ else
+ printk(BIOS_ERR, "VSM: VSA2 VR signature not valid. Install failed.\n");
+}
+#endif
+
+/* interrupt_handler() is called from assembler code only,
+ * so there is no use in putting the prototype into a header file.
+ */
int __attribute__((regparm(0))) interrupt_handler(u32 intnumber,
u32 gsfs, u32 dses,
u32 edi, u32 esi,
diff --git a/src/devices/oprom/x86_asm.S b/src/devices/oprom/x86_asm.S
index 616aa8675f..194b4cb64c 100644
--- a/src/devices/oprom/x86_asm.S
+++ b/src/devices/oprom/x86_asm.S
@@ -155,6 +155,105 @@ __run_optionrom = RELOCATED(.)
popal
ret
+#if defined(CONFIG_GEODE_VSA) && CONFIG_GEODE_VSA
+#define VSA2_ENTRY_POINT 0x60020
+
+ .globl __run_vsa
+__run_vsa = RELOCATED(.)
+ /* save all registers to the stack */
+ pushal
+
+ /* Move the protected mode stack to a safe place */
+ mov %esp, __stack
+
+ movl %esp, %ebp
+ /* This function is called with regparm=0 and we have
+ * to skip the 32 byte from pushal:
+ */
+ movl 36(%ebp), %ecx
+ movl 40(%ebp), %edx
+
+ /* Activate the right segment descriptor real mode. */
+ ljmp $0x28, $RELOCATED(1f)
+1:
+.code16
+ /* 16 bit code from here on... */
+
+ /* Load the segment registers w/ properly configured
+ * segment descriptors. They will retain these
+ * configurations (limits, writability, etc.) once
+ * protected mode is turned off.
+ */
+ mov $0x30, %ax
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %fs
+ mov %ax, %gs
+ mov %ax, %ss
+
+ /* Turn off protection */
+ movl %cr0, %eax
+ andl $~PE, %eax
+ movl %eax, %cr0
+
+ /* Now really going into real mode */
+ ljmp $0, $RELOCATED(1f)
+1:
+ /* Setup a stack: Put the stack at the end of page zero.
+ * That way we can easily share it between real and
+ * protected, since the 16-bit ESP at segment 0 will
+ * work for any case. */
+ mov $0x0, %ax
+ mov %ax, %ss
+ movl $0x1000, %eax
+ movl %eax, %esp
+
+ /* Load our 16 bit idt */
+ xor %ax, %ax
+ mov %ax, %ds
+ lidt __realmode_idt
+
+ /* Set all segments to 0x0000, ds to 0x0040 */
+ mov %ax, %es
+ mov %ax, %fs
+ mov %ax, %gs
+ mov $0x40, %ax
+ mov %ax, %ds
+ mov %cx, %ax // restore ax
+
+ /* ************************************ */
+ lcall $((VSA2_ENTRY_POINT & 0xffff0000) >> 4), $(VSA2_ENTRY_POINT & 0xffff)
+ /* ************************************ */
+
+ /* If we got here, just about done.
+ * Need to get back to protected mode
+ */
+ movl %cr0, %eax
+ orl $PE, %eax
+ movl %eax, %cr0
+
+ /* Now that we are in protected mode
+ * jump to a 32 bit code segment.
+ */
+ data32 ljmp $0x10, $RELOCATED(1f)
+1:
+ .code32
+ movw $0x18, %ax
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %fs
+ mov %ax, %gs
+ mov %ax, %ss
+
+ /* restore proper idt */
+ lidt idtarg
+
+ /* and exit */
+ mov __stack, %esp
+ popal
+ ret
+#endif
+
.globl __run_interrupt
__run_interrupt = RELOCATED(.)