summaryrefslogtreecommitdiff
path: root/src/include
diff options
context:
space:
mode:
authorFurquan Shaikh <furquan@google.com>2020-03-24 14:56:38 -0700
committerPatrick Georgi <pgeorgi@google.com>2020-03-30 08:44:53 +0000
commit1908340b6903a41750226db90d6dbd39eb527c99 (patch)
treef88dc4be483f866919c6df2df5ff058ec4cebbe1 /src/include
parentf79f8b4e33a4da257dfacce0eab582b4638791fc (diff)
memranges: Change align attribute to be log2 of required alignment
This change updates the align attribute of memranges to be represented as log2 of the required alignment. This makes it consistent with how alignment is stored in struct resource as well. Additionally, since memranges only allow power of 2 alignments, this change allows getting rid of checks at runtime and hence failure cases for non-power of 2 alignments. This change also updates the type of align to be unsigned char. BUG=b:149186922 Signed-off-by: Furquan Shaikh <furquan@google.com> Change-Id: Ie4d3868cdff55b2c7908b9b3ccd5f30a5288e62f Reviewed-on: https://review.coreboot.org/c/coreboot/+/39810 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Diffstat (limited to 'src/include')
-rw-r--r--src/include/memrange.h27
1 files changed, 13 insertions, 14 deletions
diff --git a/src/include/memrange.h b/src/include/memrange.h
index cfd29e7079..dcab791b29 100644
--- a/src/include/memrange.h
+++ b/src/include/memrange.h
@@ -24,8 +24,8 @@ struct memranges {
/* coreboot doesn't have a free() function. Therefore, keep a cache of
* free'd entries. */
struct range_entry *free_list;
- /* Alignment for base and end addresses of the range. (Must be power of 2). */
- size_t align;
+ /* Alignment(log 2) for base and end addresses of the range. */
+ unsigned char align;
};
/* Each region within a memranges structure is represented by a
@@ -96,29 +96,29 @@ static inline bool memranges_is_empty(struct memranges *ranges)
/* Initialize memranges structure providing an optional array of range_entry
* to use as the free list. Additionally, it accepts an align parameter that
- * determines the alignment of addresses. (Alignment must be a power of 2). */
+ * represents the required alignment(log 2) of addresses. */
void memranges_init_empty_with_alignment(struct memranges *ranges,
struct range_entry *free,
- size_t num_free, size_t align);
+ size_t num_free, unsigned char align);
/* Initialize and fill a memranges structure according to the
* mask and match type for all memory resources. Tag each entry with the
* specified type. Additionally, it accepts an align parameter that
- * determines the alignment of addresses. (Alignment must be a power of 2). */
+ * represents the required alignment(log 2) of addresses. */
void memranges_init_with_alignment(struct memranges *ranges,
unsigned long mask, unsigned long match,
- unsigned long tag, size_t align);
+ unsigned long tag, unsigned char align);
/* Initialize memranges structure providing an optional array of range_entry
- * to use as the free list. Addresses are default aligned to 4KiB. */
+ * to use as the free list. Addresses are default aligned to 4KiB(2^12). */
#define memranges_init_empty(__ranges, __free, __num_free) \
- memranges_init_empty_with_alignment(__ranges, __free, __num_free, 4 * KiB)
+ memranges_init_empty_with_alignment(__ranges, __free, __num_free, 12);
/* Initialize and fill a memranges structure according to the
* mask and match type for all memory resources. Tag each entry with the
- * specified type. Addresses are default aligned to 4KiB. */
+ * specified type. Addresses are default aligned to 4KiB(2^12). */
#define memranges_init(__ranges, __mask, __match, __tag) \
- memranges_init_with_alignment(__ranges, __mask, __match, __tag, 4 * KiB)
+ memranges_init_with_alignment(__ranges, __mask, __match, __tag, 12);
/* Clone a memrange. The new memrange has the same entries as the old one. */
void memranges_clone(struct memranges *newranges, struct memranges *oldranges);
@@ -175,14 +175,13 @@ struct range_entry *memranges_next_entry(struct memranges *ranges,
/* Steals memory from the available list in given ranges as per the constraints:
* limit = Upper bound for the memory range to steal.
* size = Requested size for the stolen memory.
- * align = Alignment requirements for the starting address of the stolen memory.
- * (Alignment must be a power of 2).
+ * align = Required alignment(log 2) for the starting address of the stolen memory.
* tag = Use a range that matches the given tag.
*
* If the constraints can be satisfied, this function creates a hole in the memrange,
* writes the base address of that hole to stolen_base and returns true. Otherwise it returns
* false. */
-bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size, size_t align,
- unsigned long tag, resource_t *stolen_base);
+bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
+ unsigned char align, unsigned long tag, resource_t *stolen_base);
#endif /* MEMRANGE_H_ */