From 522149c310ad8885cd0b58a88cacac24852a541f Mon Sep 17 00:00:00 2001 From: Lee Leahy Date: Fri, 8 May 2015 11:33:55 -0700 Subject: cbmem: Add initial allocation support Add support to allocate a region just below CBMEM root. This region is reserved for FSP 1.1 to use for its stack and variables. BRANCH=none BUG=None TEST=Build and run on Braswell Change-Id: I1d4b36ab366e6f8e036335c56c1756f2dfaab3f5 Signed-off-by: Lee Leahy Reviewed-on: http://review.coreboot.org/10148 Tested-by: build bot (Jenkins) Reviewed-by: Aaron Durbin --- src/include/cbmem.h | 18 ++++++++++++++++++ src/lib/imd_cbmem.c | 29 ++++++++++++++++++++--------- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/src/include/cbmem.h b/src/include/cbmem.h index 07e5645397..08c1d647bd 100644 --- a/src/include/cbmem.h +++ b/src/include/cbmem.h @@ -146,12 +146,30 @@ struct cbmem_entry; */ #define DYN_CBMEM_ALIGN_SIZE (4096) +#define CBMEM_ROOT_SIZE DYN_CBMEM_ALIGN_SIZE + +/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ +#define CBMEM_ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE +#define CBMEM_LG_ALIGN CBMEM_ROOT_MIN_SIZE + +/* Small allocation parameters. */ +#define CBMEM_SM_ROOT_SIZE 1024 +#define CBMEM_SM_ALIGN 32 + +/* Determine the size for CBMEM root and the small allocations */ +static inline size_t cbmem_overhead_size(void) +{ + return 2 * CBMEM_ROOT_MIN_SIZE; +} /* By default cbmem is attempted to be recovered. Returns 0 if cbmem was * recovered or 1 if cbmem had to be reinitialized. */ int cbmem_initialize(void); +int cbmem_initialize_id_size(u32 id, u64 size); + /* Initialize cbmem to be empty. */ void cbmem_initialize_empty(void); +void cbmem_initialize_empty_id_size(u32 id, u64 size); /* Return the top address for dynamic cbmem. The address returned needs to * be consistent across romstage and ramstage, and it is required to be diff --git a/src/lib/imd_cbmem.c b/src/lib/imd_cbmem.c index 0649bf3b17..fc12c2536d 100644 --- a/src/lib/imd_cbmem.c +++ b/src/lib/imd_cbmem.c @@ -30,13 +30,6 @@ #include #endif -/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ -#define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE -#define LG_ALIGN ROOT_MIN_SIZE -/* Small allocation parameters. */ -#define SM_ROOT_SIZE 1024 -#define SM_ALIGN 32 - static inline struct imd *cbmem_get_imd(void) { /* Only supply a backing store for imd in ramstage. */ @@ -115,6 +108,11 @@ static struct imd *imd_init_backing_with_recover(struct imd *backing) } void cbmem_initialize_empty(void) +{ + cbmem_initialize_empty_id_size(0, 0); +} + +void cbmem_initialize_empty_id_size(u32 id, u64 size) { struct imd *imd; struct imd imd_backing; @@ -127,12 +125,16 @@ void cbmem_initialize_empty(void) printk(BIOS_DEBUG, "CBMEM:\n"); - if (imd_create_tiered_empty(imd, ROOT_MIN_SIZE, LG_ALIGN, - SM_ROOT_SIZE, SM_ALIGN)) { + if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN, + CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) { printk(BIOS_DEBUG, "failed.\n"); return; } + /* Add the specified range first */ + if (size) + cbmem_add(id, size); + /* Complete migration to CBMEM. */ cbmem_run_init_hooks(); } @@ -145,6 +147,11 @@ static inline int cbmem_fail_recovery(void) } int cbmem_initialize(void) +{ + return cbmem_initialize_id_size(0, 0); +} + +int cbmem_initialize_id_size(u32 id, u64 size) { struct imd *imd; struct imd imd_backing; @@ -167,6 +174,10 @@ int cbmem_initialize(void) imd_lockdown(imd); #endif + /* Add the specified range first */ + if (size) + cbmem_add(id, size); + /* Complete migration to CBMEM. */ cbmem_run_init_hooks(); -- cgit v1.2.3