aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/x86/smm
diff options
context:
space:
mode:
authorStefan Reinauer <stepan@coresystems.de>2010-04-27 06:56:47 +0000
committerStefan Reinauer <stepan@openbios.org>2010-04-27 06:56:47 +0000
commit14e22779625de673569c7b950ecc2753fb915b31 (patch)
tree14a6ed759e116e9e6e9bbd7f499b74b96d6cc072 /src/cpu/x86/smm
parent0e1e8065e303030c39c3f2c27e5d32ee58a16c66 (diff)
Since some people disapprove of white space cleanups mixed in regular commits
while others dislike them being extra commits, let's clean them up once and for all for the existing code. If it's ugly, let it only be ugly once :-) Signed-off-by: Stefan Reinauer <stepan@coresystems.de> Acked-by: Stefan Reinauer <stepan@coresystems.de> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5507 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/cpu/x86/smm')
-rw-r--r--src/cpu/x86/smm/smiutil.c4
-rw-r--r--src/cpu/x86/smm/smm.ld4
-rw-r--r--src/cpu/x86/smm/smmhandler.S30
-rw-r--r--src/cpu/x86/smm/smmrelocate.S12
4 files changed, 25 insertions, 25 deletions
diff --git a/src/cpu/x86/smm/smiutil.c b/src/cpu/x86/smm/smiutil.c
index 9a2dfa599e..980ea69f51 100644
--- a/src/cpu/x86/smm/smiutil.c
+++ b/src/cpu/x86/smm/smiutil.c
@@ -72,14 +72,14 @@ static int uart_can_tx_byte(void)
static void uart_wait_to_tx_byte(void)
{
- while(!uart_can_tx_byte())
+ while(!uart_can_tx_byte())
;
}
static void uart_wait_until_sent(void)
{
while(!(inb(CONFIG_TTYS0_BASE + UART_LSR) & 0x40))
- ;
+ ;
}
static void uart_tx_byte(unsigned char data)
diff --git a/src/cpu/x86/smm/smm.ld b/src/cpu/x86/smm/smm.ld
index 1b25c2d2f8..d5c7127a15 100644
--- a/src/cpu/x86/smm/smm.ld
+++ b/src/cpu/x86/smm/smm.ld
@@ -4,7 +4,7 @@ CPUS = 4;
SECTIONS
{
- /* This is the actual SMM handler.
+ /* This is the actual SMM handler.
*
* We just put code, rodata, data and bss all in a row.
*/
@@ -43,7 +43,7 @@ SECTIONS
. = 0xa8000 - (( CPUS - 1) * 0x400);
.jumptable : {
*(.jumptable)
- }
+ }
/DISCARD/ : {
*(.comment)
diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S
index b443e5c1fe..3dd0b14c5a 100644
--- a/src/cpu/x86/smm/smmhandler.S
+++ b/src/cpu/x86/smm/smmhandler.S
@@ -38,11 +38,11 @@
* | |
* | |
* +--------------------------------+ 0xa8400
- * | SMM Entry Node 0 (+ stack) |
+ * | SMM Entry Node 0 (+ stack) |
* +--------------------------------+ 0xa8000
- * | SMM Entry Node 1 (+ stack) |
- * | SMM Entry Node 2 (+ stack) |
- * | SMM Entry Node 3 (+ stack) |
+ * | SMM Entry Node 1 (+ stack) |
+ * | SMM Entry Node 2 (+ stack) |
+ * | SMM Entry Node 3 (+ stack) |
* | ... |
* +--------------------------------+ 0xa7400
* | |
@@ -56,7 +56,7 @@
/* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG
* at which smm_handler_start lives. At the moment the handler
- * lives right at 0xa0000, so the offset is 0.
+ * lives right at 0xa0000, so the offset is 0.
*/
#define SMM_HANDLER_OFFSET 0x0000
@@ -101,15 +101,15 @@ smm_handler_start:
movl $LAPIC_ID, %esi
movl (%esi), %ecx
shr $24, %ecx
-
+
/* calculate stack offset by multiplying the APIC ID
* by 1024 (0x400), and save that offset in ebp.
*/
shl $10, %ecx
movl %ecx, %ebp
- /* We put the stack for each core right above
- * its SMM entry point. Core 0 starts at 0xa8000,
+ /* We put the stack for each core right above
+ * its SMM entry point. Core 0 starts at 0xa8000,
* we spare 0x10 bytes for the jump to be sure.
*/
movl $0xa8010, %eax
@@ -155,11 +155,11 @@ smm_gdt:
.long 0x00000000, 0x00000000
/* gdt selector 0x08, flat code segment */
- .word 0xffff, 0x0000
- .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
+ .word 0xffff, 0x0000
+ .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
/* gdt selector 0x10, flat data segment */
- .word 0xffff, 0x0000
+ .word 0xffff, 0x0000
.byte 0x00, 0x93, 0xcf, 0x00
smm_gdt_end:
@@ -168,7 +168,7 @@ smm_gdt_end:
.section ".jumptable", "a", @progbits
/* This is the SMM jump table. All cores use the same SMM handler
- * for simplicity. But SMM Entry needs to be different due to the
+ * for simplicity. But SMM Entry needs to be different due to the
* save state area. The jump table makes sure all CPUs jump into the
* real handler on SMM entry.
*/
@@ -185,13 +185,13 @@ smm_gdt_end:
.code16
jumptable:
/* core 3 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
+ ljmp $0xa000, $SMM_HANDLER_OFFSET
.align 1024, 0x00
/* core 2 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
+ ljmp $0xa000, $SMM_HANDLER_OFFSET
.align 1024, 0x00
/* core 1 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
+ ljmp $0xa000, $SMM_HANDLER_OFFSET
.align 1024, 0x00
/* core 0 */
ljmp $0xa000, $SMM_HANDLER_OFFSET
diff --git a/src/cpu/x86/smm/smmrelocate.S b/src/cpu/x86/smm/smmrelocate.S
index 14fdc639bc..50a8f28c3f 100644
--- a/src/cpu/x86/smm/smmrelocate.S
+++ b/src/cpu/x86/smm/smmrelocate.S
@@ -22,7 +22,7 @@
// Make sure no stage 2 code is included:
#define __PRE_RAM__
-// FIXME: Is this piece of code southbridge specific, or
+// FIXME: Is this piece of code southbridge specific, or
// can it be cleaned up so this include is not required?
// It's needed right now because we get our PM_BASE from
// here.
@@ -73,7 +73,7 @@
* 0xa0000-0xa0400 and the stub plus stack would need to go
* at 0xa8000-0xa8100 (example for core 0). That is not enough.
*
- * This means we're basically limited to 16 cpu cores before
+ * This means we're basically limited to 16 cpu cores before
* we need to use the TSEG/HSEG for the actual SMM handler plus stack.
* When we exceed 32 cores, we also need to put SMBASE to TSEG/HSEG.
*
@@ -101,7 +101,7 @@ smm_relocation_start:
addr32 mov (%ebx), %al
cmp $0x64, %al
je 1f
-
+
mov $0x38000 + 0x7ef8, %ebx
jmp smm_relocate
1:
@@ -112,8 +112,8 @@ smm_relocate:
movl $LAPIC_ID, %esi
addr32 movl (%esi), %ecx
shr $24, %ecx
-
- /* calculate offset by multiplying the
+
+ /* calculate offset by multiplying the
* apic ID by 1024 (0x400)
*/
movl %ecx, %edx
@@ -158,7 +158,7 @@ smm_relocate:
outb %al, %dx
/* calculate ascii of cpu number. More than 9 cores? -> FIXME */
movb %cl, %al
- addb $'0', %al
+ addb $'0', %al
outb %al, %dx
mov $']', %al
outb %al, %dx