diff options
author | Ronald G. Minnich <rminnich@gmail.com> | 2018-09-16 09:59:54 -0700 |
---|---|---|
committer | Patrick Georgi <pgeorgi@google.com> | 2018-10-11 17:42:41 +0000 |
commit | 83bd46e5e5df0176af1208c7feec98d64273875b (patch) | |
tree | a9b8905d9bb14b127a0b1d996ff5a712d721cab0 /src/arch | |
parent | ce1064edd6827112ee86728ac15f67daab656f54 (diff) |
selfboot: remove bounce buffers
Bounce buffers used to be used in those cases where the payload
might overlap coreboot.
Bounce buffers are a problem for rampayloads as they need malloc.
They are also an artifact of our x86 past before we had relocatable
ramstage; only x86, out of the 5 architectures we support, needs them;
currently they only seem to matter on the following chipsets:
src/northbridge/amd/amdfam10/Kconfig
src/northbridge/amd/lx/Kconfig
src/northbridge/via/vx900/Kconfig
src/soc/intel/fsp_baytrail/Kconfig
src/soc/intel/fsp_broadwell_de/Kconfig
The first three are obsolete or at least could be changed
to avoid the need to have bounce buffers.
The last two should change to no longer need them.
In any event they can be fixed or pegged to a release which supports
them.
For these five chipsets we change CONFIG_RAMBASE from 0x100000 (the
value needed in 1999 for the 32-bit Linux kernel, the original ramstage)
to 0xe00000 (14 Mib) which will put the non-relocatable x86
ramstage out of the way of any reasonable payload until we can
get rid of it for good.
14 MiB was chosen after some discussion, but it does fit well:
o Fits in the 16 MiB cacheable range coreboot sets up by default
o Most small payloads are well under 14 MiB (even kernels!)
o Most large payloads get loaded at 16 MiB (especially kernels!)
With this change in place coreboot correctly still loads a bzImage payload.
Werner reports that the 0xe00000 setting works on his broadwell systems.
Change-Id: I602feb32f35e8af1d0dc4ea9f25464872c9b824c
Signed-off-by: Ronald G. Minnich <rminnich@gmail.com>
Reviewed-on: https://review.coreboot.org/28647
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Diffstat (limited to 'src/arch')
-rw-r--r-- | src/arch/arm/boot.c | 5 | ||||
-rw-r--r-- | src/arch/arm64/boot.c | 5 | ||||
-rw-r--r-- | src/arch/mips/boot.c | 5 | ||||
-rw-r--r-- | src/arch/power8/boot.c | 5 | ||||
-rw-r--r-- | src/arch/riscv/boot.c | 5 | ||||
-rw-r--r-- | src/arch/x86/Kconfig | 11 | ||||
-rw-r--r-- | src/arch/x86/boot.c | 201 |
7 files changed, 9 insertions, 228 deletions
diff --git a/src/arch/arm/boot.c b/src/arch/arm/boot.c index e208fc9051..1767fe04c2 100644 --- a/src/arch/arm/boot.c +++ b/src/arch/arm/boot.c @@ -25,8 +25,3 @@ void arch_prog_run(struct prog *prog) doit = prog_entry(prog); doit(prog_entry_arg(prog)); } - -int arch_supports_bounce_buffer(void) -{ - return 0; -} diff --git a/src/arch/arm64/boot.c b/src/arch/arm64/boot.c index 2357912eae..d8a4630750 100644 --- a/src/arch/arm64/boot.c +++ b/src/arch/arm64/boot.c @@ -55,11 +55,6 @@ void arch_prog_run(struct prog *prog) doit(prog_entry_arg(prog)); } -int arch_supports_bounce_buffer(void) -{ - return 0; -} - /* Generic stage entry point. Can be overridden by board/SoC if needed. */ __weak void stage_entry(void) { diff --git a/src/arch/mips/boot.c b/src/arch/mips/boot.c index 608af7b588..5ab36ec390 100644 --- a/src/arch/mips/boot.c +++ b/src/arch/mips/boot.c @@ -23,8 +23,3 @@ void arch_prog_run(struct prog *prog) doit(cb_tables); } - -int arch_supports_bounce_buffer(void) -{ - return 0; -} diff --git a/src/arch/power8/boot.c b/src/arch/power8/boot.c index fa1586f25b..4da60b4e6c 100644 --- a/src/arch/power8/boot.c +++ b/src/arch/power8/boot.c @@ -21,8 +21,3 @@ void arch_prog_run(struct prog *prog) doit(prog_entry_arg(prog)); } - -int arch_supports_bounce_buffer(void) -{ - return 0; -} diff --git a/src/arch/riscv/boot.c b/src/arch/riscv/boot.c index d7233fe3da..e1dc61955b 100644 --- a/src/arch/riscv/boot.c +++ b/src/arch/riscv/boot.c @@ -48,8 +48,3 @@ void arch_prog_run(struct prog *prog) doit(prog_entry_arg(prog)); } - -int arch_supports_bounce_buffer(void) -{ - return 0; -} diff --git a/src/arch/x86/Kconfig b/src/arch/x86/Kconfig index c85e36a8ea..46e0c2d368 100644 --- a/src/arch/x86/Kconfig +++ b/src/arch/x86/Kconfig @@ -80,13 +80,20 @@ config SIPI_VECTOR_IN_ROM default n depends on ARCH_X86 +# Set the rambase for systems that still need it, only 5 chipsets as of +# Sep 2018. This value was 0x100000, chosen to match the entry point +# of Linux 2.2 in 1999. The new value, 14 MiB, makes a lot more sense +# for as long as we need it; with luck, that won't be much longer. +# In the long term, both RAMBASE and RAMTOP should be removed. +# This value leaves more than 1 MiB which is required for fam10 +# and broadwell_de. config RAMBASE hex - default 0x100000 + default 0xe00000 config RAMTOP hex - default 0x200000 + default 0x1000000 depends on ARCH_X86 # Traditionally BIOS region on SPI flash boot media was memory mapped right below diff --git a/src/arch/x86/boot.c b/src/arch/x86/boot.c index 8a6592f189..2967cf60a1 100644 --- a/src/arch/x86/boot.c +++ b/src/arch/x86/boot.c @@ -19,193 +19,6 @@ #include <string.h> #include <symbols.h> -/* When the ramstage is relocatable the elf loading ensures an elf image cannot - * be loaded over the ramstage code. */ -static void jmp_payload_no_bounce_buffer(void *entry) -{ - /* Jump to kernel */ - __asm__ __volatile__( - " cld\n\t" - /* Now jump to the loaded image */ - " call *%0\n\t" - - /* The loaded image returned? */ - " cli\n\t" - " cld\n\t" - - :: - "r" (entry) - ); -} - -static void jmp_payload(void *entry, unsigned long buffer, unsigned long size) -{ - unsigned long lb_start, lb_size; - - lb_start = (unsigned long)&_program; - lb_size = _program_size; - - printk(BIOS_SPEW, "entry = 0x%08lx\n", (unsigned long)entry); - printk(BIOS_SPEW, "lb_start = 0x%08lx\n", lb_start); - printk(BIOS_SPEW, "lb_size = 0x%08lx\n", lb_size); - printk(BIOS_SPEW, "buffer = 0x%08lx\n", buffer); - - /* Jump to kernel */ - __asm__ __volatile__( - " cld\n\t" -#ifdef __x86_64__ - /* switch back to 32-bit mode */ - " push %4\n\t" - " push %3\n\t" - " push %2\n\t" - " push %1\n\t" - " push %0\n\t" - - /* use iret to switch to 32-bit code segment */ - " xor %%rax,%%rax\n\t" - " mov %%ss, %%ax\n\t" - " push %%rax\n\t" - " mov %%rsp, %%rax\n\t" - " add $8, %%rax\n\t" - " push %%rax\n\t" - " pushfq\n\t" - " push $0x10\n\t" - " lea 3(%%rip), %%rax\n\t" - " push %%rax\n\t" - " iretq\n\t" - ".code32\n\t" - /* disable paging */ - " mov %%cr0, %%eax\n\t" - " btc $31, %%eax\n\t" - " mov %%eax, %%cr0\n\t" - /* disable long mode */ - " mov $0xC0000080, %%ecx\n\t" - " rdmsr\n\t" - " btc $8, %%eax\n\t" - " wrmsr\n\t" - - " pop %%eax\n\t" - " add $4, %%esp\n\t" - " pop %%ebx\n\t" - " add $4, %%esp\n\t" - " pop %%ecx\n\t" - - " add $4, %%esp\n\t" - " pop %%edx\n\t" - " add $4, %%esp\n\t" - " pop %%esi\n\t" - " add $4, %%esp\n\t" -#endif - - /* Save the callee save registers... */ - " pushl %%esi\n\t" - " pushl %%edi\n\t" - " pushl %%ebx\n\t" - /* Save the parameters I was passed */ -#ifdef __x86_64__ - " pushl $0\n\t" /* 20 adjust */ - " pushl %%eax\n\t" /* 16 lb_start */ - " pushl %%ebx\n\t" /* 12 buffer */ - " pushl %%ecx\n\t" /* 8 lb_size */ - " pushl %%edx\n\t" /* 4 entry */ - " pushl %%esi\n\t" /* 0 elf_boot_notes */ -#else - " pushl $0\n\t" /* 20 adjust */ - " pushl %0\n\t" /* 16 lb_start */ - " pushl %1\n\t" /* 12 buffer */ - " pushl %2\n\t" /* 8 lb_size */ - " pushl %3\n\t" /* 4 entry */ - " pushl %4\n\t" /* 0 elf_boot_notes */ - -#endif - /* Compute the adjustment */ - " xorl %%eax, %%eax\n\t" - " subl 16(%%esp), %%eax\n\t" - " addl 12(%%esp), %%eax\n\t" - " addl 8(%%esp), %%eax\n\t" - " movl %%eax, 20(%%esp)\n\t" - /* Place a copy of coreboot in its new location */ - /* Move ``longs'' the coreboot size is 4 byte aligned */ - " movl 12(%%esp), %%edi\n\t" - " addl 8(%%esp), %%edi\n\t" - " movl 16(%%esp), %%esi\n\t" - " movl 8(%%esp), %%ecx\n\n" - " shrl $2, %%ecx\n\t" - " rep movsl\n\t" - - /* Adjust the stack pointer to point into the new coreboot - * image - */ - " addl 20(%%esp), %%esp\n\t" - /* Adjust the instruction pointer to point into the new coreboot - * image - */ - " movl $1f, %%eax\n\t" - " addl 20(%%esp), %%eax\n\t" - " jmp *%%eax\n\t" - "1:\n\t" - - /* Copy the coreboot bounce buffer over coreboot */ - /* Move ``longs'' the coreboot size is 4 byte aligned */ - " movl 16(%%esp), %%edi\n\t" - " movl 12(%%esp), %%esi\n\t" - " movl 8(%%esp), %%ecx\n\t" - " shrl $2, %%ecx\n\t" - " rep movsl\n\t" - - /* Now jump to the loaded image */ - " movl %5, %%eax\n\t" - " movl 0(%%esp), %%ebx\n\t" - " call *4(%%esp)\n\t" - - /* The loaded image returned? */ - " cli\n\t" - " cld\n\t" - - /* Copy the saved copy of coreboot where coreboot runs */ - /* Move ``longs'' the coreboot size is 4 byte aligned */ - " movl 16(%%esp), %%edi\n\t" - " movl 12(%%esp), %%esi\n\t" - " addl 8(%%esp), %%esi\n\t" - " movl 8(%%esp), %%ecx\n\t" - " shrl $2, %%ecx\n\t" - " rep movsl\n\t" - - /* Adjust the stack pointer to point into the old coreboot - * image - */ - " subl 20(%%esp), %%esp\n\t" - - /* Adjust the instruction pointer to point into the old coreboot - * image - */ - " movl $1f, %%eax\n\t" - " subl 20(%%esp), %%eax\n\t" - " jmp *%%eax\n\t" - "1:\n\t" - - /* Drop the parameters I was passed */ - " addl $24, %%esp\n\t" - - /* Restore the callee save registers */ - " popl %%ebx\n\t" - " popl %%edi\n\t" - " popl %%esi\n\t" -#ifdef __x86_64__ - ".code64\n\t" -#endif - :: - "ri" (lb_start), "ri" (buffer), "ri" (lb_size), - "ri" (entry), - "ri"(0), "ri" (0) - ); -} - -int arch_supports_bounce_buffer(void) -{ - return !IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE); -} - int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size) { if (start < 1 * MiB && (start + size) <= 1 * MiB) { @@ -217,22 +30,8 @@ int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size) return 0; } -static void try_payload(struct prog *prog) -{ - if (prog_type(prog) == PROG_PAYLOAD) { - if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE)) - jmp_payload_no_bounce_buffer(prog_entry(prog)); - else - jmp_payload(prog_entry(prog), - (uintptr_t)prog_start(prog), - prog_size(prog)); - } -} - void arch_prog_run(struct prog *prog) { - if (ENV_RAMSTAGE) - try_payload(prog); __asm__ volatile ( #ifdef __x86_64__ "jmp *%%rdi\n" |