diff options
-rw-r--r-- | src/include/cbmem.h | 13 | ||||
-rw-r--r-- | src/lib/Makefile.inc | 7 | ||||
-rw-r--r-- | src/lib/cbmem_common.c | 46 | ||||
-rw-r--r-- | src/lib/dynamic_cbmem.c | 470 | ||||
-rw-r--r-- | src/lib/imd_cbmem.c | 294 | ||||
-rw-r--r-- | util/cbmem/cbmem.c | 4 |
6 files changed, 321 insertions, 513 deletions
diff --git a/src/include/cbmem.h b/src/include/cbmem.h index 8c61cb2a18..936fdf6838 100644 --- a/src/include/cbmem.h +++ b/src/include/cbmem.h @@ -88,11 +88,6 @@ #include <stddef.h> #include <stdint.h> -struct cbmem_id_to_name { - u32 id; - const char *name; -}; - #define CBMEM_ID_TO_NAME_TABLE \ { CBMEM_ID_ACPI, "ACPI " }, \ { CBMEM_ID_ACPI_GNVS, "ACPI GNVS " }, \ @@ -142,13 +137,6 @@ struct cbmem_entry; * dynamic cbmem infrastructure allocates new regions below the last allocated * region. Regions are defined by a cbmem_entry struct that is opaque. Regions * may be removed, but the last one added is the only that can be removed. - * - * Dynamic cbmem has two allocators within it. All allocators use a top down - * allocation scheme. However, there are 2 modes for each allocation depending - * on the requested size. There are large allocations and small allocations. - * An allocation is considered to be small when it is less than or equal to - * DYN_CBMEM_ALIGN_SIZE / 2. The smaller allocations are fit into a larger - * allocation region. */ #define DYN_CBMEM_ALIGN_SIZE (4096) @@ -202,7 +190,6 @@ void cbmem_fail_resume(void); /* Add the cbmem memory used to the memory map at boot. */ void cbmem_add_bootmem(void); void cbmem_list(void); -void cbmem_print_entry(int n, u32 id, u64 start, u64 size); #endif /* __PRE_RAM__ */ /* These are for compatibility with old boards only. Any new chipset and board diff --git a/src/lib/Makefile.inc b/src/lib/Makefile.inc index 043238a263..f0fb028741 100644 --- a/src/lib/Makefile.inc +++ b/src/lib/Makefile.inc @@ -99,8 +99,11 @@ ramstage-$(CONFIG_GENERIC_GPIO_LIB) += gpio.c ramstage-$(CONFIG_GENERIC_UDELAY) += timer.c ramstage-y += b64_decode.c -romstage-y += cbmem_common.c dynamic_cbmem.c -ramstage-y += cbmem_common.c dynamic_cbmem.c +romstage-y += cbmem_common.c +romstage-y += imd_cbmem.c + +ramstage-y += cbmem_common.c +ramstage-y += imd_cbmem.c romstage-y += imd.c ramstage-y += imd.c diff --git a/src/lib/cbmem_common.c b/src/lib/cbmem_common.c index c3e838388d..6d581c40b2 100644 --- a/src/lib/cbmem_common.c +++ b/src/lib/cbmem_common.c @@ -18,39 +18,16 @@ */ #include <console/console.h> #include <cbmem.h> -#include <stdlib.h> +#include <bootstate.h> +#include <rules.h> +#if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) +#include <arch/acpi.h> +#endif /* FIXME: Remove after CBMEM_INIT_HOOKS. */ #include <console/cbmem_console.h> #include <timestamp.h> -#ifndef __PRE_RAM__ - -static const struct cbmem_id_to_name cbmem_ids[] = { CBMEM_ID_TO_NAME_TABLE }; - -void cbmem_print_entry(int n, u32 id, u64 base, u64 size) -{ - int i; - const char *name; - - name = NULL; - for (i = 0; i < ARRAY_SIZE(cbmem_ids); i++) { - if (cbmem_ids[i].id == id) { - name = cbmem_ids[i].name; - break; - } - } - - if (name == NULL) - printk(BIOS_DEBUG, "%08x ", id); - else - printk(BIOS_DEBUG, "%s", name); - printk(BIOS_DEBUG, "%2d. ", n); - printk(BIOS_DEBUG, "%08llx ", base); - printk(BIOS_DEBUG, "%08llx\n", size); -} - -#endif /* !__PRE_RAM__ */ /* FIXME: Replace with CBMEM_INIT_HOOKS API. */ #if !IS_ENABLED(CONFIG_ARCH_X86) @@ -67,3 +44,16 @@ void __attribute__((weak)) cbmem_fail_resume(void) { } #endif + +#if ENV_RAMSTAGE && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) +static void init_cbmem_post_device(void *unused) +{ + if (acpi_is_wakeup()) + cbmem_initialize(); + else + cbmem_initialize_empty(); +} + +BOOT_STATE_INIT_ENTRY(BS_POST_DEVICE, BS_ON_ENTRY, + init_cbmem_post_device, NULL); +#endif diff --git a/src/lib/dynamic_cbmem.c b/src/lib/dynamic_cbmem.c deleted file mode 100644 index c63cb46eb2..0000000000 --- a/src/lib/dynamic_cbmem.c +++ /dev/null @@ -1,470 +0,0 @@ -/* - * This file is part of the coreboot project. - * - * Copyright (C) 2013 Google, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include <bootstate.h> -#include <bootmem.h> -#include <console/console.h> -#include <cbmem.h> -#include <string.h> -#include <stdlib.h> -#include <arch/early_variables.h> -#if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) -#include <arch/acpi.h> -#endif -#ifndef UINT_MAX -#define UINT_MAX 4294967295U -#endif - -/* - * The dynamic cbmem code uses a root region. The root region boundary - * addresses are determined by cbmem_top() and ROOT_MIN_SIZE. Just below - * the address returned by cbmem_top() is a pointer that points to the - * root data structure. The root data structure provides the book keeping - * for each large entry. - */ - -/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ -#define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE -#define CBMEM_POINTER_MAGIC 0xc0389479 -#define CBMEM_ENTRY_MAGIC ~(CBMEM_POINTER_MAGIC) - -/* The cbmem_root_pointer structure lives just below address returned - * from cbmem_top(). It points to the root data structure that - * maintains the entries. */ -struct cbmem_root_pointer { - u32 magic; - u32 root; -} __attribute__((packed)); - -struct cbmem_entry { - u32 magic; - u32 start; - u32 size; - u32 id; -} __attribute__((packed)); - -struct cbmem_root { - u32 max_entries; - u32 num_entries; - u32 locked; - u32 size; - struct cbmem_entry entries[0]; -} __attribute__((packed)); - - -#if !defined(__PRE_RAM__) -static void *cached_cbmem_top; - -void cbmem_set_top(void * ramtop) -{ - cached_cbmem_top = ramtop; -} -#endif - -static inline void *cbmem_top_cached(void) -{ -#if !defined(__PRE_RAM__) - if (cached_cbmem_top == NULL) - cached_cbmem_top = cbmem_top(); - - return cached_cbmem_top; -#else - return cbmem_top(); -#endif -} - -static inline uintptr_t get_top_aligned(void) -{ - uintptr_t top; - - /* Align down what is returned from cbmem_top(). */ - top = (uintptr_t)cbmem_top_cached(); - top &= ~(DYN_CBMEM_ALIGN_SIZE - 1); - - return top; -} - -static inline void *get_root(void) -{ - uintptr_t pointer_addr; - struct cbmem_root_pointer *pointer; - - pointer_addr = get_top_aligned(); - if (pointer_addr == 0) - return NULL; - - pointer_addr -= sizeof(struct cbmem_root_pointer); - - pointer = (void *)pointer_addr; - if (pointer->magic != CBMEM_POINTER_MAGIC) - return NULL; - - pointer_addr = pointer->root; - return (void *)pointer_addr; -} - -static inline void cbmem_entry_assign(struct cbmem_entry *entry, - u32 id, u32 start, u32 size) -{ - entry->magic = CBMEM_ENTRY_MAGIC; - entry->start = start; - entry->size = size; - entry->id = id; -} - -static inline const struct cbmem_entry * -cbmem_entry_append(struct cbmem_root *root, u32 id, u32 start, u32 size) -{ - struct cbmem_entry *cbmem_entry; - - cbmem_entry = &root->entries[root->num_entries]; - root->num_entries++; - - cbmem_entry_assign(cbmem_entry, id, start, size); - - return cbmem_entry; -} - -void cbmem_initialize_empty(void) -{ - uintptr_t pointer_addr; - uintptr_t root_addr; - unsigned long max_entries; - struct cbmem_root *root; - struct cbmem_root_pointer *pointer; - - /* Place the root pointer and the root. The number of entries is - * dictated by difference between the root address and the pointer - * where the root address is aligned down to - * DYN_CBMEM_ALIGN_SIZE. The pointer falls just below the - * address returned by get_top_aligned(). */ - pointer_addr = get_top_aligned(); - if (pointer_addr == 0) - return; - - root_addr = pointer_addr - ROOT_MIN_SIZE; - root_addr &= ~(DYN_CBMEM_ALIGN_SIZE - 1); - pointer_addr -= sizeof(struct cbmem_root_pointer); - - max_entries = (pointer_addr - (root_addr + sizeof(*root))) / - sizeof(struct cbmem_entry); - - pointer = (void *)pointer_addr; - pointer->magic = CBMEM_POINTER_MAGIC; - pointer->root = root_addr; - - root = (void *)root_addr; - root->max_entries = max_entries; - root->num_entries = 0; - root->locked = 0; - root->size = pointer_addr - root_addr + - sizeof(struct cbmem_root_pointer); - - /* Add an entry covering the root region. */ - cbmem_entry_append(root, CBMEM_ID_ROOT, root_addr, root->size); - - printk(BIOS_DEBUG, "CBMEM: root @ %p %d entries.\n", - root, root->max_entries); - - /* Complete migration to CBMEM. */ - cbmem_run_init_hooks(); -} - -static inline int cbmem_fail_recovery(void) -{ - cbmem_initialize_empty(); - cbmem_fail_resume(); - return 1; -} - -static int validate_entries(struct cbmem_root *root) -{ - unsigned int i; - uintptr_t current_end; - - current_end = get_top_aligned(); - - printk(BIOS_DEBUG, "CBMEM: recovering %d/%d entries from root @ %p\n", - root->num_entries, root->max_entries, root); - - /* Check that all regions are properly aligned and are just below - * the previous entry */ - for (i = 0; i < root->num_entries; i++) { - struct cbmem_entry *entry = &root->entries[i]; - - if (entry->magic != CBMEM_ENTRY_MAGIC) - return -1; - - if (entry->start & (DYN_CBMEM_ALIGN_SIZE - 1)) - return -1; - - if (entry->start + entry->size != current_end) - return -1; - - current_end = entry->start; - } - - return 0; -} - -int cbmem_initialize(void) -{ - struct cbmem_root *root; - uintptr_t top_according_to_root; - - root = get_root(); - - /* No recovery possible since root couldn't be recovered. */ - if (root == NULL) - return cbmem_fail_recovery(); - - /* Sanity check the root. */ - top_according_to_root = (root->size + (uintptr_t)root); - if (get_top_aligned() != top_according_to_root) - return cbmem_fail_recovery(); - - if (root->num_entries > root->max_entries) - return cbmem_fail_recovery(); - - if ((root->max_entries * sizeof(struct cbmem_entry)) > - (root->size - sizeof(struct cbmem_root_pointer) - sizeof(*root))) - return cbmem_fail_recovery(); - - /* Validate current entries. */ - if (validate_entries(root)) - return cbmem_fail_recovery(); - -#if defined(__PRE_RAM__) - /* Lock the root in the romstage on a recovery. The assumption is that - * recovery is called during romstage on the S3 resume path. */ - root->locked = 1; -#endif - - /* Complete migration to CBMEM. */ - cbmem_run_init_hooks(); - - /* Recovery successful. */ - return 0; -} - -int cbmem_recovery(int is_wakeup) -{ - int rv = 0; - if (!is_wakeup) - cbmem_initialize_empty(); - else - rv = cbmem_initialize(); - return rv; -} - -static uintptr_t cbmem_base(void) -{ - struct cbmem_root *root; - uintptr_t low_addr; - - root = get_root(); - - if (root == NULL) - return 0; - - low_addr = (uintptr_t)root; - /* a low address is low. */ - low_addr &= 0xffffffff; - - /* Assume the lowest address is the last one added. */ - if (root->num_entries > 0) { - low_addr = root->entries[root->num_entries - 1].start; - } - - return low_addr; -} - - -const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64) -{ - struct cbmem_root *root; - const struct cbmem_entry *entry; - uintptr_t base; - u32 size; - u32 aligned_size; - - entry = cbmem_entry_find(id); - - if (entry != NULL) - return entry; - - /* Only handle sizes <= UINT_MAX internally. */ - if (size64 > (u64)UINT_MAX) - return NULL; - - size = size64; - - root = get_root(); - - if (root == NULL) - return NULL; - - /* Nothing can be added once it is locked down. */ - if (root->locked) - return NULL; - - if (root->max_entries == root->num_entries) - return NULL; - - aligned_size = ALIGN(size, DYN_CBMEM_ALIGN_SIZE); - base = cbmem_base(); - base -= aligned_size; - - return cbmem_entry_append(root, id, base, aligned_size); -} - -void *cbmem_add(u32 id, u64 size) -{ - const struct cbmem_entry *entry; - - entry = cbmem_entry_add(id, size); - - if (entry == NULL) - return NULL; - - return cbmem_entry_start(entry); -} - -/* Retrieve a region provided a given id. */ -const struct cbmem_entry *cbmem_entry_find(u32 id) -{ - struct cbmem_root *root; - const struct cbmem_entry *entry; - unsigned int i; - - root = get_root(); - - if (root == NULL) - return NULL; - - entry = NULL; - - for (i = 0; i < root->num_entries; i++) { - if (root->entries[i].id == id) { - entry = &root->entries[i]; - break; - } - } - - return entry; -} - -void *cbmem_find(u32 id) -{ - const struct cbmem_entry *entry; - - entry = cbmem_entry_find(id); - - if (entry == NULL) - return NULL; - - return cbmem_entry_start(entry); -} - -/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region - * cannot be removed unless it was the last one added. */ -int cbmem_entry_remove(const struct cbmem_entry *entry) -{ - unsigned long entry_num; - struct cbmem_root *root; - - root = get_root(); - - if (root == NULL) - return -1; - - if (root->num_entries == 0) - return -1; - - /* Nothing can be removed. */ - if (root->locked) - return -1; - - entry_num = entry - &root->entries[0]; - - /* If the entry is the last one in the root it can be removed. */ - if (entry_num == (root->num_entries - 1)) { - root->num_entries--; - return 0; - } - - return -1; -} - -u64 cbmem_entry_size(const struct cbmem_entry *entry) -{ - return entry->size; -} - -void *cbmem_entry_start(const struct cbmem_entry *entry) -{ - uintptr_t addr = entry->start; - return (void *)addr; -} - - -#if !defined(__PRE_RAM__) - -#if !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) -static void init_cbmem_post_device(void *unused) -{ - if (acpi_is_wakeup()) - cbmem_initialize(); - else - cbmem_initialize_empty(); -} - -BOOT_STATE_INIT_ENTRY(BS_POST_DEVICE, BS_ON_ENTRY, - init_cbmem_post_device, NULL); -#endif - -void cbmem_add_bootmem(void) -{ - uintptr_t base; - uintptr_t top; - - base = cbmem_base(); - top = get_top_aligned(); - bootmem_add_range(base, top - base, LB_MEM_TABLE); -} - -void cbmem_list(void) -{ - unsigned int i; - struct cbmem_root *root; - - root = get_root(); - - if (root == NULL) - return; - - for (i = 0; i < root->num_entries; i++) { - struct cbmem_entry *entry; - - entry = &root->entries[i]; - - cbmem_print_entry(i, entry->id, entry->start, entry->size); - } -} -#endif /* __PRE_RAM__ */ diff --git a/src/lib/imd_cbmem.c b/src/lib/imd_cbmem.c new file mode 100644 index 0000000000..0649bf3b17 --- /dev/null +++ b/src/lib/imd_cbmem.c @@ -0,0 +1,294 @@ +/* + * This file is part of the coreboot project. + * + * Copyright (C) 2013 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc. + */ + +#include <bootstate.h> +#include <bootmem.h> +#include <console/console.h> +#include <cbmem.h> +#include <imd.h> +#include <rules.h> +#include <string.h> +#include <stdlib.h> +#include <arch/early_variables.h> +#if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) +#include <arch/acpi.h> +#endif + +/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ +#define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE +#define LG_ALIGN ROOT_MIN_SIZE +/* Small allocation parameters. */ +#define SM_ROOT_SIZE 1024 +#define SM_ALIGN 32 + +static inline struct imd *cbmem_get_imd(void) +{ + /* Only supply a backing store for imd in ramstage. */ + if (ENV_RAMSTAGE) { + static struct imd imd_cbmem; + return &imd_cbmem; + } + return NULL; +} + +/* + * x86 !CONFIG_EARLY_CBMEM_INIT platforms need to do the following in ramstage: + * 1. Call set_top_of_ram() which in turn calls cbmem_set_top(). + * 2. Provide a get_top_of_ram() implementation. + * + * CONFIG_EARLY_CBMEM_INIT platforms just need to provide cbmem_top(). + */ +void cbmem_set_top(void *ramtop) +{ + struct imd *imd = cbmem_get_imd(); + + imd_handle_init(imd, ramtop); +} + +static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e) +{ + return (const struct cbmem_entry *)e; +} + +static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e) +{ + return (const struct imd_entry *)e; +} + +/* These are the different situations to handle: + * CONFIG_EARLY_CBMEM_INIT: + * In ramstage cbmem_initialize() attempts a recovery of the + * cbmem region set up by romstage. It uses cbmem_top() as the + * starting point of recovery. + * + * In romstage, similar to ramstage, cbmem_initialize() needs to + * attempt recovery of the cbmem area using cbmem_top() as the limit. + * cbmem_initialize_empty() initializes an empty cbmem area from + * cbmem_top(); + * + */ +static struct imd *imd_init_backing(struct imd *backing) +{ + struct imd *imd; + + imd = cbmem_get_imd(); + + if (imd != NULL) + return imd; + + imd = backing; + + return imd; +} + +static struct imd *imd_init_backing_with_recover(struct imd *backing) +{ + struct imd *imd; + + imd = imd_init_backing(backing); + if (!ENV_RAMSTAGE) { + /* Early cbmem init platforms need to always use cbmem_top(). */ + if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)) + imd_handle_init(imd, cbmem_top()); + /* Need to partially recover all the time outside of ramstage + * because there's object storage outside of the stack. */ + imd_handle_init_partial_recovery(imd); + } + + return imd; +} + +void cbmem_initialize_empty(void) +{ + struct imd *imd; + struct imd imd_backing; + + imd = imd_init_backing(&imd_backing); + + /* Early cbmem init platforms need to always use cbmem_top(). */ + if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)) + imd_handle_init(imd, cbmem_top()); + + printk(BIOS_DEBUG, "CBMEM:\n"); + + if (imd_create_tiered_empty(imd, ROOT_MIN_SIZE, LG_ALIGN, + SM_ROOT_SIZE, SM_ALIGN)) { + printk(BIOS_DEBUG, "failed.\n"); + return; + } + + /* Complete migration to CBMEM. */ + cbmem_run_init_hooks(); +} + +static inline int cbmem_fail_recovery(void) +{ + cbmem_initialize_empty(); + cbmem_fail_resume(); + return 1; +} + +int cbmem_initialize(void) +{ + struct imd *imd; + struct imd imd_backing; + + imd = imd_init_backing(&imd_backing); + + /* Early cbmem init platforms need to always use cbmem_top(). */ + if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)) + imd_handle_init(imd, cbmem_top()); + + if (imd_recover(imd)) + return 1; + +#if defined(__PRE_RAM__) + /* + * Lock the imd in romstage on a recovery. The assumption is that + * if the imd area was recovered in romstage then S3 resume path + * is being taken. + */ + imd_lockdown(imd); +#endif + + /* Complete migration to CBMEM. */ + cbmem_run_init_hooks(); + + /* Recovery successful. */ + return 0; +} + +int cbmem_recovery(int is_wakeup) +{ + int rv = 0; + if (!is_wakeup) + cbmem_initialize_empty(); + else + rv = cbmem_initialize(); + return rv; +} + +const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64) +{ + struct imd *imd; + struct imd imd_backing; + const struct imd_entry *e; + + imd = imd_init_backing_with_recover(&imd_backing); + + e = imd_entry_find_or_add(imd, id, size64); + + return imd_to_cbmem(e); +} + +void *cbmem_add(u32 id, u64 size) +{ + struct imd *imd; + struct imd imd_backing; + const struct imd_entry *e; + + imd = imd_init_backing_with_recover(&imd_backing); + + e = imd_entry_find_or_add(imd, id, size); + + if (e == NULL) + return NULL; + + return imd_entry_at(imd, e); +} + +/* Retrieve a region provided a given id. */ +const struct cbmem_entry *cbmem_entry_find(u32 id) +{ + struct imd *imd; + struct imd imd_backing; + const struct imd_entry *e; + + imd = imd_init_backing_with_recover(&imd_backing); + + e = imd_entry_find(imd, id); + + return imd_to_cbmem(e); +} + +void *cbmem_find(u32 id) +{ + struct imd *imd; + struct imd imd_backing; + const struct imd_entry *e; + + imd = imd_init_backing_with_recover(&imd_backing); + + e = imd_entry_find(imd, id); + + if (e == NULL) + return NULL; + + return imd_entry_at(imd, e); +} + +/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region + * cannot be removed unless it was the last one added. */ +int cbmem_entry_remove(const struct cbmem_entry *entry) +{ + struct imd *imd; + struct imd imd_backing; + + imd = imd_init_backing_with_recover(&imd_backing); + + return imd_entry_remove(imd, cbmem_to_imd(entry)); +} + +u64 cbmem_entry_size(const struct cbmem_entry *entry) +{ + struct imd *imd; + struct imd imd_backing; + + imd = imd_init_backing_with_recover(&imd_backing); + + return imd_entry_size(imd, cbmem_to_imd(entry)); +} + +void *cbmem_entry_start(const struct cbmem_entry *entry) +{ + struct imd *imd; + struct imd imd_backing; + + imd = imd_init_backing_with_recover(&imd_backing); + + return imd_entry_at(imd, cbmem_to_imd(entry)); +} + +#if ENV_RAMSTAGE +void cbmem_add_bootmem(void) +{ + void *base = NULL; + size_t size = 0; + + imd_region_used(cbmem_get_imd(), &base, &size); + bootmem_add_range((uintptr_t)base, size, LB_MEM_TABLE); +} + +void cbmem_list(void) +{ + static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE }; + + imd_print_entries(cbmem_get_imd(), lookup, ARRAY_SIZE(lookup)); +} +#endif /* __PRE_RAM__ */ diff --git a/util/cbmem/cbmem.c b/util/cbmem/cbmem.c index 042159192b..7007354b34 100644 --- a/util/cbmem/cbmem.c +++ b/util/cbmem/cbmem.c @@ -608,6 +608,10 @@ struct cbmem_entry { uint64_t size; } __attribute__((packed)); +struct cbmem_id_to_name { + uint32_t id; + const char *name; +}; static const struct cbmem_id_to_name cbmem_ids[] = { CBMEM_ID_TO_NAME_TABLE }; void cbmem_print_entry(int n, uint32_t id, uint64_t base, uint64_t size) |