summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arch/x86/c_start.S13
-rw-r--r--src/arch/x86/exit_car.S19
-rw-r--r--src/arch/x86/gdt_init.S12
-rw-r--r--src/cpu/x86/lapic/secondary.S6
-rw-r--r--src/cpu/x86/sipi_vector.S2
-rw-r--r--src/cpu/x86/smm/smm_stub.S2
6 files changed, 31 insertions, 23 deletions
diff --git a/src/arch/x86/c_start.S b/src/arch/x86/c_start.S
index f55ab6e06e..b7ffddc385 100644
--- a/src/arch/x86/c_start.S
+++ b/src/arch/x86/c_start.S
@@ -32,8 +32,11 @@ thread_stacks:
.globl _start
_start:
cli
+#ifdef __x86_64__
+ movabs $gdtaddr, %rax
+ lgdt (%rax)
+#else
lgdt %cs:gdtaddr
-#ifndef __x86_64__
ljmp $RAM_CODE_SEG, $1f
#endif
1: movl $RAM_DATA_SEG, %eax
@@ -52,11 +55,14 @@ _start:
cld
#ifdef __x86_64__
- mov %rdi, _cbmem_top_ptr
+ mov %rdi, %rax
+ movabs %rax, _cbmem_top_ptr
+ movabs $_stack, %rdi
#else
/* The return argument is at 0(%esp), the calling argument at 4(%esp) */
movl 4(%esp), %eax
movl %eax, _cbmem_top_ptr
+ leal _stack, %edi
#endif
/** poison the stack. Code should not count on the
@@ -64,7 +70,6 @@ _start:
* recently uncovered a bug in the broadcast SIPI
* code.
*/
- leal _stack, %edi
movl $_estack, %ecx
subl %edi, %ecx
shrl $2, %ecx /* it is 32 bit aligned, right? */
@@ -226,7 +231,7 @@ SetCodeSelector:
push %rsp
pushfq
push %rcx # cx is code segment selector from caller
- mov $setCodeSelectorLongJump, %rax
+ movabs $setCodeSelectorLongJump, %rax
push %rax
# the iret will continue at next instruction, with the new cs value
diff --git a/src/arch/x86/exit_car.S b/src/arch/x86/exit_car.S
index fae7899e17..d1b1a537fe 100644
--- a/src/arch/x86/exit_car.S
+++ b/src/arch/x86/exit_car.S
@@ -49,7 +49,8 @@ _start:
#endif
#ifdef __x86_64__
- mov %rdi, _cbmem_top_ptr
+ mov %rdi, %rax
+ movabs %rax, _cbmem_top_ptr
#else
/* The return argument is at 0(%esp), the calling argument at 4(%esp) */
movl 4(%esp), %eax
@@ -60,7 +61,12 @@ _start:
cpuid
btl $CPUID_FEATURE_CLFLUSH_BIT, %edx
jnc skip_clflush
+#ifdef __x86_64__
+ movabs _cbmem_top_ptr, %rax
+ clflush (%rax)
+#else
clflush _cbmem_top_ptr
+#endif
skip_clflush:
/* chipset_teardown_car() is expected to disable cache-as-ram. */
@@ -71,17 +77,24 @@ skip_clflush:
mov %cr0, %rax
and $(~(CR0_CD | CR0_NW)), %eax
mov %rax, %cr0
+
+ /* Ensure cache is clean. */
+ invd
+
+ /* Set up new stack. */
+ movabs post_car_stack_top, %rax
+ mov %rax, %rsp
#else
mov %cr0, %eax
and $(~(CR0_CD | CR0_NW)), %eax
mov %eax, %cr0
-#endif
+
/* Ensure cache is clean. */
invd
/* Set up new stack. */
mov post_car_stack_top, %esp
-
+#endif
/*
* Honor variable MTRR information pushed on the stack with the
* following layout:
diff --git a/src/arch/x86/gdt_init.S b/src/arch/x86/gdt_init.S
index f33a1517d8..30b39653c9 100644
--- a/src/arch/x86/gdt_init.S
+++ b/src/arch/x86/gdt_init.S
@@ -23,18 +23,6 @@ gdtptr:
.section .init._gdt64_, "ax", @progbits
.globl gdt_init64
gdt_init64:
- /* Workaround a bug in the assembler.
- * The following code doesn't work:
- * lgdt gdtptr64
- *
- * The assembler tries to save memory by using 32bit displacement addressing mode.
- * Displacements are using signed integers.
- * This is fine in protected mode, as the negative address points to the correct
- * address > 2GiB, but in long mode this doesn't work at all.
- * Tests showed that QEMU can gracefully handle it, but real CPUs can't.
- *
- * Use the movabs pseudo instruction to force using a 64bit absolute address.
- */
movabs $gdtptr64, %rax
lgdt (%rax)
ret
diff --git a/src/cpu/x86/lapic/secondary.S b/src/cpu/x86/lapic/secondary.S
index 073e6b485b..d36bc9a645 100644
--- a/src/cpu/x86/lapic/secondary.S
+++ b/src/cpu/x86/lapic/secondary.S
@@ -61,9 +61,11 @@ __ap_protected_start:
#if ENV_X86_64
/* entry64.inc preserves ebx. */
#include <cpu/x86/64bit/entry64.inc>
- mov secondary_stack, %rsp
+ movabs secondary_stack, %rax
+ mov %rax, %rsp
andl $0xfffffff0, %esp
- mov secondary_cpu_index, %rdi
+ movabs secondary_cpu_index, %rax
+ mov %rax, %rdi
#else
/* Set the stack pointer, and flag that we are done */
xorl %eax, %eax
diff --git a/src/cpu/x86/sipi_vector.S b/src/cpu/x86/sipi_vector.S
index f9b29576bd..d8156b88a8 100644
--- a/src/cpu/x86/sipi_vector.S
+++ b/src/cpu/x86/sipi_vector.S
@@ -220,7 +220,7 @@ load_msr:
mov %rsi, %rdi /* cpu_num */
- movl c_handler, %eax
+ movabs c_handler, %eax
call *%rax
#else
/* c_handler(cpu_num), preserve proper stack alignment */
diff --git a/src/cpu/x86/smm/smm_stub.S b/src/cpu/x86/smm/smm_stub.S
index 0c690da986..07be047a36 100644
--- a/src/cpu/x86/smm/smm_stub.S
+++ b/src/cpu/x86/smm/smm_stub.S
@@ -210,7 +210,7 @@ apicid_end:
mov %rsp, %rdi /* *arg */
- movl c_handler, %eax
+ movabs c_handler, %eax
call *%rax
/*