summaryrefslogtreecommitdiff
path: root/src/lib/memrange.c
diff options
context:
space:
mode:
authorNico Huber <nico.h@gmx.de>2020-05-25 00:03:14 +0200
committerMartin L Roth <gaumless@gmail.com>2022-09-04 16:35:22 +0000
commit526c64249a0a0c7b83d1914b88d4f938ede7dc1c (patch)
tree639322f2e32b0c11e629cc329e16c5c681c54781 /src/lib/memrange.c
parent38688519cf4bca485c41b48ac5c0cc334daa8ba7 (diff)
allocator_v4: Introduce RESOURCE_ALLOCATION_TOP_DOWN
Add option to resource allocator v4 that restores the top-down allocation approach at the domain level. This makes it easier to handle 64-bit resources natively. With the top-down approach, resources that can be placed either above or below 4G would be placed above, to save precious space below the 4G boundary. Change-Id: Iaf463d3e6b37d52e46761d8e210034fded58a8a4 Signed-off-by: Nico Huber <nico.h@gmx.de> Reviewed-on: https://review.coreboot.org/c/coreboot/+/41957 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org> Reviewed-by: Lean Sheng Tan <sheng.tan@9elements.com> Reviewed-by: Subrata Banik <subratabanik@google.com>
Diffstat (limited to 'src/lib/memrange.c')
-rw-r--r--src/lib/memrange.c36
1 files changed, 23 insertions, 13 deletions
diff --git a/src/lib/memrange.c b/src/lib/memrange.c
index 39f502caa6..b68b86e2f8 100644
--- a/src/lib/memrange.c
+++ b/src/lib/memrange.c
@@ -378,11 +378,11 @@ struct range_entry *memranges_next_entry(struct memranges *ranges,
/* Find a range entry that satisfies the given constraints to fit a hole that matches the
* required alignment, is big enough, does not exceed the limit and has a matching tag. */
-static const struct range_entry *memranges_find_entry(struct memranges *ranges,
- resource_t limit, resource_t size,
- unsigned char align, unsigned long tag)
+static const struct range_entry *
+memranges_find_entry(struct memranges *ranges, resource_t limit, resource_t size,
+ unsigned char align, unsigned long tag, bool last)
{
- const struct range_entry *r;
+ const struct range_entry *r, *last_entry = NULL;
resource_t base, end;
if (size == 0)
@@ -407,25 +407,35 @@ static const struct range_entry *memranges_find_entry(struct memranges *ranges,
if (end > limit)
break;
- return r;
+ if (!last)
+ return r;
+
+ last_entry = r;
}
- return NULL;
+ return last_entry;
}
bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
- unsigned char align, unsigned long tag, resource_t *stolen_base)
+ unsigned char align, unsigned long tag, resource_t *stolen_base,
+ bool from_top)
{
- resource_t base;
- const struct range_entry *r = memranges_find_entry(ranges, limit, size, align, tag);
+ const struct range_entry *r;
+ r = memranges_find_entry(ranges, limit, size, align, tag, from_top);
if (r == NULL)
return false;
- base = ALIGN_UP(r->begin, POWER_OF_2(align));
-
- memranges_create_hole(ranges, base, size);
- *stolen_base = base;
+ if (from_top) {
+ /* Ensure we're within the range, even aligned down.
+ Proof is simple: If ALIGN_UP(r->begin) would be
+ higher, the stolen range wouldn't fit.*/
+ assert(r->begin <= ALIGN_DOWN(range_entry_end(r) - size, POWER_OF_2(align)));
+ *stolen_base = ALIGN_DOWN(range_entry_end(r) - size, POWER_OF_2(align));
+ } else {
+ *stolen_base = ALIGN_UP(r->begin, POWER_OF_2(align));
+ }
+ memranges_create_hole(ranges, *stolen_base, size);
return true;
}