From 1c4c7ad1e5af3a9d9bdd061f5bb4780a4292021f Mon Sep 17 00:00:00 2001 From: Patrick Rudolph Date: Fri, 3 Dec 2021 17:32:07 +0100 Subject: arch/x86/c_start.S: Add proper x86_64 code Don't truncate upper bits in assembly code and thus allow loading of ramstage above 4GiB. Tested on qemu with cbmem_top set to TOUUD. Change-Id: Ifc9b45f69d0b7534b2faacaad0d099cef2667478 Signed-off-by: Patrick Rudolph Co-authored-by: Benjamin Doron Reviewed-on: https://review.coreboot.org/c/coreboot/+/59874 Tested-by: build bot (Jenkins) --- src/arch/x86/c_start.S | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'src/arch') diff --git a/src/arch/x86/c_start.S b/src/arch/x86/c_start.S index 02ddf75d12..6bea8db10d 100644 --- a/src/arch/x86/c_start.S +++ b/src/arch/x86/c_start.S @@ -59,6 +59,24 @@ _start: leal _stack, %edi #endif +#if ENV_X86_64 + /** poison the stack. Code should not count on the + * stack being full of zeros. This stack poisoning + * recently uncovered a bug in the broadcast SIPI + * code. + */ + movabs $_estack, %rcx + sub %rdi, %rcx + shr $3, %rcx /* it is 64 bit aligned, right? */ + movq $0xDEADBEEFDEADBEEF, %rax + rep + stosq + + /* Set new stack with enforced alignment. */ + movabs $_estack, %rsp + movq $(0xfffffffffffffff0), %rax + and %rax, %rsp +#else /** poison the stack. Code should not count on the * stack being full of zeros. This stack poisoning * recently uncovered a bug in the broadcast SIPI @@ -74,6 +92,7 @@ _start: /* Set new stack with enforced alignment. */ movl $_estack, %esp andl $(0xfffffff0), %esp +#endif /* * Now we are finished. Memory is up, data is copied and @@ -82,7 +101,12 @@ _start: */ post_code(POSTCODE_PRE_HARDWAREMAIN) /* post 6e */ +#if ENV_X86_64 + movq $0xFFFFFFFFFFFFFFF0, %rax + and %rax, %rsp +#else andl $0xFFFFFFF0, %esp +#endif #if CONFIG(ASAN_IN_RAMSTAGE) call asan_init -- cgit v1.2.3