diff options
-rw-r--r-- | src/include/cbmem.h | 18 | ||||
-rw-r--r-- | src/lib/imd_cbmem.c | 29 |
2 files changed, 38 insertions, 9 deletions
diff --git a/src/include/cbmem.h b/src/include/cbmem.h index 07e5645397..08c1d647bd 100644 --- a/src/include/cbmem.h +++ b/src/include/cbmem.h @@ -146,12 +146,30 @@ struct cbmem_entry; */ #define DYN_CBMEM_ALIGN_SIZE (4096) +#define CBMEM_ROOT_SIZE DYN_CBMEM_ALIGN_SIZE + +/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ +#define CBMEM_ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE +#define CBMEM_LG_ALIGN CBMEM_ROOT_MIN_SIZE + +/* Small allocation parameters. */ +#define CBMEM_SM_ROOT_SIZE 1024 +#define CBMEM_SM_ALIGN 32 + +/* Determine the size for CBMEM root and the small allocations */ +static inline size_t cbmem_overhead_size(void) +{ + return 2 * CBMEM_ROOT_MIN_SIZE; +} /* By default cbmem is attempted to be recovered. Returns 0 if cbmem was * recovered or 1 if cbmem had to be reinitialized. */ int cbmem_initialize(void); +int cbmem_initialize_id_size(u32 id, u64 size); + /* Initialize cbmem to be empty. */ void cbmem_initialize_empty(void); +void cbmem_initialize_empty_id_size(u32 id, u64 size); /* Return the top address for dynamic cbmem. The address returned needs to * be consistent across romstage and ramstage, and it is required to be diff --git a/src/lib/imd_cbmem.c b/src/lib/imd_cbmem.c index 0649bf3b17..fc12c2536d 100644 --- a/src/lib/imd_cbmem.c +++ b/src/lib/imd_cbmem.c @@ -30,13 +30,6 @@ #include <arch/acpi.h> #endif -/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ -#define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE -#define LG_ALIGN ROOT_MIN_SIZE -/* Small allocation parameters. */ -#define SM_ROOT_SIZE 1024 -#define SM_ALIGN 32 - static inline struct imd *cbmem_get_imd(void) { /* Only supply a backing store for imd in ramstage. */ @@ -116,6 +109,11 @@ static struct imd *imd_init_backing_with_recover(struct imd *backing) void cbmem_initialize_empty(void) { + cbmem_initialize_empty_id_size(0, 0); +} + +void cbmem_initialize_empty_id_size(u32 id, u64 size) +{ struct imd *imd; struct imd imd_backing; @@ -127,12 +125,16 @@ void cbmem_initialize_empty(void) printk(BIOS_DEBUG, "CBMEM:\n"); - if (imd_create_tiered_empty(imd, ROOT_MIN_SIZE, LG_ALIGN, - SM_ROOT_SIZE, SM_ALIGN)) { + if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN, + CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) { printk(BIOS_DEBUG, "failed.\n"); return; } + /* Add the specified range first */ + if (size) + cbmem_add(id, size); + /* Complete migration to CBMEM. */ cbmem_run_init_hooks(); } @@ -146,6 +148,11 @@ static inline int cbmem_fail_recovery(void) int cbmem_initialize(void) { + return cbmem_initialize_id_size(0, 0); +} + +int cbmem_initialize_id_size(u32 id, u64 size) +{ struct imd *imd; struct imd imd_backing; @@ -167,6 +174,10 @@ int cbmem_initialize(void) imd_lockdown(imd); #endif + /* Add the specified range first */ + if (size) + cbmem_add(id, size); + /* Complete migration to CBMEM. */ cbmem_run_init_hooks(); |