From cf4a3f4a9781dab1e08aa2d0c937d4bd196e02f6 Mon Sep 17 00:00:00 2001 From: Aaron Durbin Date: Tue, 26 Mar 2013 18:07:32 +0100 Subject: Revert "coreboot table: use memrange library" This reverts commit 56075eaefcd7ef51464206166b24a0a47a59147f Change-Id: I8a37ce1f5ce36e4a120941ec264140abc9447ff5 Reviewed-on: http://review.coreboot.org/2915 Reviewed-by: Stefan Reinauer Tested-by: build bot (Jenkins) --- src/lib/coreboot_table.c | 271 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 218 insertions(+), 53 deletions(-) (limited to 'src') diff --git a/src/lib/coreboot_table.c b/src/lib/coreboot_table.c index 840470b9bc..84eb5a65a9 100644 --- a/src/lib/coreboot_table.c +++ b/src/lib/coreboot_table.c @@ -29,7 +29,6 @@ #include #include #include -#include #if CONFIG_USE_OPTION_TABLE #include #endif @@ -357,7 +356,54 @@ static struct lb_forward *lb_forward(struct lb_header *header, struct lb_header return forward; } -static unsigned long lb_table_fini(struct lb_header *head) +static void lb_memory_range(struct lb_memory *mem, + uint32_t type, uint64_t start, uint64_t size) +{ + int entries; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); + mem->map[entries].start = pack_lb64(start); + mem->map[entries].size = pack_lb64(size); + mem->map[entries].type = type; + mem->size += sizeof(mem->map[0]); +} + +static void lb_reserve_table_memory(struct lb_header *head) +{ +/* Dynamic cbmem has already reserved the memory where the coreboot tables + * reside. Therefore, there is nothing to fix up. */ +#if !CONFIG_DYNAMIC_CBMEM + struct lb_record *last_rec; + struct lb_memory *mem; + uint64_t start; + uint64_t end; + int i, entries; + + last_rec = lb_last_record(head); + mem = get_lb_mem(); + start = (unsigned long)head; + end = (unsigned long)last_rec; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); + /* Resize the right two memory areas so this table is in + * a reserved area of memory. Everything has been carefully + * setup so that is all we need to do. + */ + for(i = 0; i < entries; i++ ) { + uint64_t map_start = unpack_lb64(mem->map[i].start); + uint64_t map_end = map_start + unpack_lb64(mem->map[i].size); + /* Does this area need to be expanded? */ + if (map_end == start) { + mem->map[i].size = pack_lb64(end - map_start); + } + /* Does this area need to be contracted? */ + else if (map_start == start) { + mem->map[i].start = pack_lb64(end); + mem->map[i].size = pack_lb64(map_end - end); + } + } +#endif +} + +static unsigned long lb_table_fini(struct lb_header *head, int fixup) { struct lb_record *rec, *first_rec; rec = lb_last_record(head); @@ -365,6 +411,9 @@ static unsigned long lb_table_fini(struct lb_header *head) head->table_bytes += rec->size; } + if (fixup) + lb_reserve_table_memory(head); + first_rec = lb_first_record(head); head->table_checksum = compute_ip_checksum(first_rec, head->table_bytes); head->header_checksum = 0; @@ -375,53 +424,128 @@ static unsigned long lb_table_fini(struct lb_header *head) return (unsigned long)rec + rec->size; } -/* Routines to extract part so the coreboot table or - * information from the coreboot table after we have written it. - * Currently get_lb_mem relies on a global we can change the - * implementaiton. - */ -static struct lb_memory *mem_ranges = NULL; - -struct lb_memory *get_lb_mem(void) +static void lb_cleanup_memory_ranges(struct lb_memory *mem) { - return mem_ranges; -} + int entries; + int i, j; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); + + /* Sort the lb memory ranges */ + for(i = 0; i < entries; i++) { + uint64_t entry_start = unpack_lb64(mem->map[i].start); + for(j = i + 1; j < entries; j++) { + uint64_t temp_start = unpack_lb64(mem->map[j].start); + if (temp_start < entry_start) { + struct lb_memory_range tmp; + tmp = mem->map[i]; + mem->map[i] = mem->map[j]; + mem->map[j] = tmp; + } + } + } -/* This structure keeps track of the coreboot table memory ranges. */ -static struct memory_ranges lb_ranges; + /* Merge adjacent entries */ + for(i = 0; (i + 1) < entries; i++) { + uint64_t start, end, nstart, nend; + if (mem->map[i].type != mem->map[i + 1].type) { + continue; + } + start = unpack_lb64(mem->map[i].start); + end = start + unpack_lb64(mem->map[i].size); + nstart = unpack_lb64(mem->map[i + 1].start); + nend = nstart + unpack_lb64(mem->map[i + 1].size); + if ((start <= nstart) && (end >= nstart)) { + if (start > nstart) { + start = nstart; + } + if (end < nend) { + end = nend; + } + /* Record the new region size */ + mem->map[i].start = pack_lb64(start); + mem->map[i].size = pack_lb64(end - start); + + /* Delete the entry I have merged with */ + memmove(&mem->map[i + 1], &mem->map[i + 2], + ((entries - i - 2) * sizeof(mem->map[0]))); + mem->size -= sizeof(mem->map[0]); + entries -= 1; + /* See if I can merge with the next entry as well */ + i -= 1; + } + } +} -static struct lb_memory *build_lb_mem(struct lb_header *head) +static void lb_remove_memory_range(struct lb_memory *mem, + uint64_t start, uint64_t size) { - struct lb_memory *mem; - - /* Record where the lb memory ranges will live */ - mem = lb_memory(head); - mem_ranges = mem; + uint64_t end; + int entries; + int i; - /* Fill the memory map out. The order of operations is important in - * that each overlapping range will take over the next. Therefore, - * add cacheable resources as RAM then add the reserved resources. */ - memory_ranges_init(&lb_ranges, IORESOURCE_CACHEABLE, - IORESOURCE_CACHEABLE, LB_MEM_RAM); - memory_ranges_add_resources(&lb_ranges, IORESOURCE_RESERVE, - IORESOURCE_RESERVE, LB_MEM_RESERVED); + end = start + size; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); + + /* Remove a reserved area from the memory map */ + for(i = 0; i < entries; i++) { + uint64_t map_start = unpack_lb64(mem->map[i].start); + uint64_t map_end = map_start + unpack_lb64(mem->map[i].size); + if ((start <= map_start) && (end >= map_end)) { + /* Remove the completely covered range */ + memmove(&mem->map[i], &mem->map[i + 1], + ((entries - i - 1) * sizeof(mem->map[0]))); + mem->size -= sizeof(mem->map[0]); + entries -= 1; + /* Since the index will disappear revisit what will appear here */ + i -= 1; + } + else if ((start > map_start) && (end < map_end)) { + /* Split the memory range */ + memmove(&mem->map[i + 1], &mem->map[i], + ((entries - i) * sizeof(mem->map[0]))); + mem->size += sizeof(mem->map[0]); + entries += 1; + /* Update the first map entry */ + mem->map[i].size = pack_lb64(start - map_start); + /* Update the second map entry */ + mem->map[i + 1].start = pack_lb64(end); + mem->map[i + 1].size = pack_lb64(map_end - end); + /* Don't bother with this map entry again */ + i += 1; + } + else if ((start <= map_start) && (end > map_start)) { + /* Shrink the start of the memory range */ + mem->map[i].start = pack_lb64(end); + mem->map[i].size = pack_lb64(map_end - end); + } + else if ((start < map_end) && (start > map_start)) { + /* Shrink the end of the memory range */ + mem->map[i].size = pack_lb64(start - map_start); + } + } +} - return mem; +void lb_add_memory_range(struct lb_memory *mem, + uint32_t type, uint64_t start, uint64_t size) +{ + lb_remove_memory_range(mem, start, size); + lb_memory_range(mem, type, start, size); + lb_cleanup_memory_ranges(mem); } -static void commit_lb_memory(struct lb_memory *mem) +static void lb_dump_memory_ranges(struct lb_memory *mem) { - struct memory_range *r; - struct lb_memory_range *lb_r; + int entries; int i; + entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]); - lb_r = &mem->map[0]; - i = 0; - - memory_ranges_each_entry(r, &lb_ranges) { + printk(BIOS_DEBUG, "coreboot memory table:\n"); + for(i = 0; i < entries; i++) { + uint64_t entry_start = unpack_lb64(mem->map[i].start); + uint64_t entry_size = unpack_lb64(mem->map[i].size); const char *entry_type; - switch (memory_range_tag(r)) { + switch (mem->map[i].type) { case LB_MEM_RAM: entry_type="RAM"; break; case LB_MEM_RESERVED: entry_type="RESERVED"; break; case LB_MEM_ACPI: entry_type="ACPI"; break; @@ -433,25 +557,58 @@ static void commit_lb_memory(struct lb_memory *mem) } printk(BIOS_DEBUG, "%2d. %016llx-%016llx: %s\n", - i, memory_range_base(r), memory_range_end(r)-1, - entry_type); - - lb_r->start = pack_lb64(memory_range_base(r)); - lb_r->size = pack_lb64(memory_range_size(r)); - lb_r->type = memory_range_tag(r); + i, entry_start, entry_start+entry_size-1, entry_type); - i++; - lb_r++; - mem->size += sizeof(struct lb_memory_range); } } -void lb_add_memory_range(struct lb_memory *mem, - uint32_t type, uint64_t start, uint64_t size) +/* Routines to extract part so the coreboot table or + * information from the coreboot table after we have written it. + * Currently get_lb_mem relies on a global we can change the + * implementaiton. + */ +static struct lb_memory *mem_ranges = NULL; + +struct lb_memory *get_lb_mem(void) { - insert_memory_range(&lb_ranges, start, size, type); + return mem_ranges; } +static void build_lb_mem_range(void *gp, struct device *dev, struct resource *res) +{ + struct lb_memory *mem = gp; + lb_memory_range(mem, LB_MEM_RAM, res->base, res->size); +} + +static struct lb_memory *build_lb_mem(struct lb_header *head) +{ + struct lb_memory *mem; + + /* Record where the lb memory ranges will live */ + mem = lb_memory(head); + mem_ranges = mem; + + /* Build the raw table of memory */ + search_global_resources( + IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | IORESOURCE_CACHEABLE, + build_lb_mem_range, mem); + lb_cleanup_memory_ranges(mem); + return mem; +} + +static void lb_add_rsvd_range(void *gp, struct device *dev, struct resource *res) +{ + struct lb_memory *mem = gp; + lb_add_memory_range(mem, LB_MEM_RESERVED, res->base, res->size); +} + +static void add_lb_reserved(struct lb_memory *mem) +{ + /* Add reserved ranges */ + search_global_resources( + IORESOURCE_MEM | IORESOURCE_RESERVE, IORESOURCE_MEM | IORESOURCE_RESERVE, + lb_add_rsvd_range, mem); +} unsigned long write_coreboot_table( unsigned long low_table_start, unsigned long low_table_end, @@ -466,7 +623,7 @@ unsigned long write_coreboot_table( head = lb_table_init(low_table_end); lb_forward(head, (struct lb_header*)rom_table_end); - low_table_end = (unsigned long) lb_table_fini(head); + low_table_end = (unsigned long) lb_table_fini(head, 0); printk(BIOS_DEBUG, "Table forward entry ends at 0x%08lx.\n", low_table_end); low_table_end = ALIGN(low_table_end, 4096); @@ -525,9 +682,17 @@ unsigned long write_coreboot_table( high_tables_base, high_tables_size); #endif /* CONFIG_DYNAMIC_CBMEM */ - /* No other memory areas can be added after the memory table has been - * committed as the entries won't show up in the serialize mem table. */ - commit_lb_memory(mem); + /* Add reserved regions */ + add_lb_reserved(mem); + + lb_dump_memory_ranges(mem); + + /* Note: + * I assume that there is always memory at immediately after + * the low_table_end. This means that after I setup the coreboot table. + * I can trivially fixup the reserved memory ranges to hold the correct + * size of the coreboot table. + */ /* Record our motherboard */ lb_mainboard(head); @@ -556,5 +721,5 @@ unsigned long write_coreboot_table( add_cbmem_pointers(head); /* Remember where my valid memory ranges are */ - return lb_table_fini(head); + return lb_table_fini(head, 1); } -- cgit v1.2.3