diff options
author | Arthur Heymans <arthur@aheymans.xyz> | 2019-11-25 12:20:01 +0100 |
---|---|---|
committer | Lean Sheng Tan <sheng.tan@9elements.com> | 2023-03-13 13:42:32 +0000 |
commit | 3134a8152590f6d93232f6e56ab08fd87ebe1a0d (patch) | |
tree | 8a7569aa1f9f204e36cc7571bd2ed71eca714b24 /src | |
parent | 4bad919ce47fae7187dfc8ed0c0186a78fd10597 (diff) |
cpu/x86/cache: CLFLUSH programs to memory before running
When cbmem is initialized in romstage and postcar placed in the stage
cache + cbmem where it is run, the assumption is made that these are
all in UC memory such that calling INVD in postcar is OK.
For performance reasons (e.g. postcar decompression) it is desirable
to cache cbmem and the stage cache during romstage.
Another reason is that AGESA sets up MTRR during romstage to cache all
dram, which is currently worked around by using additional MTRR's to
make that UC.
TESTED on asus/p5ql-em, up/squared on both regular and S3 resume
bootpath. Sometimes there are minimal performance improvements
when cbmem is cached (few ms).
Change-Id: I7ff2a57aee620908b71829457ea0f5a0c410ec5b
Signed-off-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/37196
Reviewed-by: Lean Sheng Tan <sheng.tan@9elements.com>
Reviewed-by: Kapil Porwal <kapilporwal@google.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src')
-rw-r--r-- | src/arch/x86/postcar_loader.c | 22 | ||||
-rw-r--r-- | src/cpu/x86/Kconfig | 5 | ||||
-rw-r--r-- | src/cpu/x86/Makefile.inc | 1 | ||||
-rw-r--r-- | src/cpu/x86/cache/Makefile.inc | 2 | ||||
-rw-r--r-- | src/cpu/x86/cache/cache.c | 58 | ||||
-rw-r--r-- | src/include/cpu/x86/cache.h | 4 |
6 files changed, 92 insertions, 0 deletions
diff --git a/src/arch/x86/postcar_loader.c b/src/arch/x86/postcar_loader.c index a8a77e0227..17286805ef 100644 --- a/src/arch/x86/postcar_loader.c +++ b/src/arch/x86/postcar_loader.c @@ -9,6 +9,7 @@ #include <program_loading.h> #include <reset.h> #include <rmodule.h> +#include <romstage_handoff.h> #include <security/vboot/vboot_common.h> #include <stage_cache.h> #include <timestamp.h> @@ -137,6 +138,25 @@ static void postcar_cache_invalid(void) board_reset(); } +/* + * POSTCAR will call invd so don't make assumptions on cbmem + * and external stage cache being UC. + */ +static void postcar_flush_cache(void) +{ + uintptr_t cbmem_base; + size_t cbmem_size; + uintptr_t stage_cache_base; + size_t stage_cache_size; + + cbmem_get_region((void **)&cbmem_base, &cbmem_size); + prog_segment_loaded(cbmem_base, cbmem_size, SEG_FINAL); + if (CONFIG(TSEG_STAGE_CACHE) && !romstage_handoff_is_resume()) { + stage_cache_external_region((void **)&stage_cache_base, &stage_cache_size); + prog_segment_loaded(stage_cache_base, stage_cache_size, SEG_FINAL); + } +} + static void run_postcar_phase(struct postcar_frame *pcf) { struct prog prog = @@ -159,6 +179,8 @@ static void run_postcar_phase(struct postcar_frame *pcf) console_time_report(); + postcar_flush_cache(); + prog_set_arg(&prog, cbmem_top()); prog_run(&prog); diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig index 50766cdac1..c85eace851 100644 --- a/src/cpu/x86/Kconfig +++ b/src/cpu/x86/Kconfig @@ -126,6 +126,11 @@ config SETUP_XIP_CACHE non-eviction mode and therefore need to be careful to avoid eviction. +config X86_CLFLUSH_CAR + bool + help + Select this on platforms that allow CLFLUSH while operating in CAR. + config HAVE_SMI_HANDLER bool default n diff --git a/src/cpu/x86/Makefile.inc b/src/cpu/x86/Makefile.inc index dffc2b1cb9..3ef3a90313 100644 --- a/src/cpu/x86/Makefile.inc +++ b/src/cpu/x86/Makefile.inc @@ -5,6 +5,7 @@ subdirs-$(CONFIG_HAVE_SMI_HANDLER) += smm subdirs-$(CONFIG_UDELAY_TSC) += tsc # Use ARCH_BOOTBLOCK_X86_64 as a proxy for knowing if 64bit is going to be used subdirs-$(CONFIG_ARCH_BOOTBLOCK_X86_64) += 64bit +subdirs-y += cache subdirs-$(CONFIG_PARALLEL_MP) += name ramstage-$(CONFIG_PARALLEL_MP) += mp_init.c diff --git a/src/cpu/x86/cache/Makefile.inc b/src/cpu/x86/cache/Makefile.inc new file mode 100644 index 0000000000..759488ec7e --- /dev/null +++ b/src/cpu/x86/cache/Makefile.inc @@ -0,0 +1,2 @@ +romstage-y += cache.c +ramstage-y += cache.c diff --git a/src/cpu/x86/cache/cache.c b/src/cpu/x86/cache/cache.c new file mode 100644 index 0000000000..d02d6d4427 --- /dev/null +++ b/src/cpu/x86/cache/cache.c @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include <arch/cpu.h> +#include <cbmem.h> +#include <console/console.h> +#include <cpu/x86/cache.h> +#include <program_loading.h> +#include <types.h> + +bool clflush_supported(void) +{ + return (cpuid_edx(1) >> CPUID_FEATURE_CLFLUSH_BIT) & 1; +} + +static void clflush_region(const uintptr_t start, const size_t size) +{ + uintptr_t addr; + const size_t cl_size = ((cpuid_ebx(1) >> 8) & 0xff) * 8; + + if (!clflush_supported()) { + printk(BIOS_DEBUG, "Not flushing cache to RAM, CLFLUSH not supported\n"); + return; + } + + printk(BIOS_SPEW, "CLFLUSH [0x%lx, 0x%lx]\n", start, start + size); + + for (addr = ALIGN_DOWN(start, cl_size); addr < start + size; addr += cl_size) + clflush((void *)addr); +} + +/* + * For each segment of a program loaded this function is called + * to invalidate caches for the addresses of the loaded segment + */ +void arch_segment_loaded(uintptr_t start, size_t size, int flags) +{ + /* INVD is only called in postcar stage so we only need + to make sure that our code hits dram during romstage. */ + if (!ENV_CACHE_AS_RAM) + return; + if (!ENV_ROMSTAGE) + return; + if (!CONFIG(POSTCAR_STAGE)) + return; + if (!CONFIG(X86_CLFLUSH_CAR)) + return; + if (flags != SEG_FINAL) + return; + + /* + * The assumption is made here that DRAM is only ready after cbmem + * is initialized, to avoid flushing when loading earlier things (e.g. FSP, ...) + */ + if (!cbmem_online()) + return; + + clflush_region(start, size); +} diff --git a/src/include/cpu/x86/cache.h b/src/include/cpu/x86/cache.h index 27b727bcb9..63703a7871 100644 --- a/src/include/cpu/x86/cache.h +++ b/src/include/cpu/x86/cache.h @@ -12,6 +12,8 @@ #if !defined(__ASSEMBLER__) +#include <stdbool.h> + static inline void wbinvd(void) { asm volatile ("wbinvd" ::: "memory"); @@ -27,6 +29,8 @@ static inline void clflush(void *addr) asm volatile ("clflush (%0)"::"r" (addr)); } +bool clflush_supported(void); + /* The following functions require the __always_inline due to AMD * function STOP_CAR_AND_CPU that disables cache as * RAM, the cache as RAM stack can no longer be used. Called |