aboutsummaryrefslogtreecommitdiff
path: root/src/commonlib
diff options
context:
space:
mode:
authorRaul E Rangel <rrangel@chromium.org>2021-07-23 16:43:18 -0600
committerPatrick Georgi <pgeorgi@google.com>2021-11-04 10:33:52 +0000
commit5ac82dcc20678629f2dd5497d9b657bcfa7acdf2 (patch)
tree2e3b37072907c6683f732f22ecfe8f50bebed378 /src/commonlib
parent533fc4dfb155bb45e8da279e1b85b676e3f6c58c (diff)
commonlib/mem_pool: Allow configuring the alignment
AMD platforms require the destination to be 64 byte aligned in order to use the SPI DMA controller. This is enforced by the destination address register because the first 6 bits are marked as reserved. This change adds an option to the mem_pool so the alignment can be configured. BUG=b:179699789 TEST=Boot guybrush to OS Signed-off-by: Raul E Rangel <rrangel@chromium.org> Change-Id: I8d77ffe4411f86c54450305320c9f52ab41a3075 Reviewed-on: https://review.coreboot.org/c/coreboot/+/56580 Reviewed-by: Karthik Ramasubramanian <kramasub@google.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/commonlib')
-rw-r--r--src/commonlib/include/commonlib/mem_pool.h16
-rw-r--r--src/commonlib/mem_pool.c7
2 files changed, 16 insertions, 7 deletions
diff --git a/src/commonlib/include/commonlib/mem_pool.h b/src/commonlib/include/commonlib/mem_pool.h
index 6c85397314..42b5d1ed96 100644
--- a/src/commonlib/include/commonlib/mem_pool.h
+++ b/src/commonlib/include/commonlib/mem_pool.h
@@ -3,6 +3,7 @@
#ifndef _MEM_POOL_H_
#define _MEM_POOL_H_
+#include <assert.h>
#include <stddef.h>
#include <stdint.h>
@@ -16,23 +17,23 @@
* were chosen to optimize for the CBFS cache case which may need two buffers
* to map a single compressed file, and will free them in reverse order.)
*
- * The memory returned by allocations are at least 8 byte aligned. Note
- * that this requires the backing buffer to start on at least an 8 byte
- * alignment.
+ * You must ensure the backing buffer is 'alignment' aligned.
*/
struct mem_pool {
uint8_t *buf;
size_t size;
+ size_t alignment;
uint8_t *last_alloc;
uint8_t *second_to_last_alloc;
size_t free_offset;
};
-#define MEM_POOL_INIT(buf_, size_) \
+#define MEM_POOL_INIT(buf_, size_, alignment_) \
{ \
.buf = (buf_), \
.size = (size_), \
+ .alignment = (alignment_), \
.last_alloc = NULL, \
.second_to_last_alloc = NULL, \
.free_offset = 0, \
@@ -46,10 +47,15 @@ static inline void mem_pool_reset(struct mem_pool *mp)
}
/* Initialize a memory pool. */
-static inline void mem_pool_init(struct mem_pool *mp, void *buf, size_t sz)
+static inline void mem_pool_init(struct mem_pool *mp, void *buf, size_t sz,
+ size_t alignment)
{
+ assert(alignment);
+ assert((uintptr_t)buf % alignment == 0);
+
mp->buf = buf;
mp->size = sz;
+ mp->alignment = alignment;
mem_pool_reset(mp);
}
diff --git a/src/commonlib/mem_pool.c b/src/commonlib/mem_pool.c
index c300c65d6e..d82ab18bd7 100644
--- a/src/commonlib/mem_pool.c
+++ b/src/commonlib/mem_pool.c
@@ -7,8 +7,11 @@ void *mem_pool_alloc(struct mem_pool *mp, size_t sz)
{
void *p;
- /* Make all allocations be at least 8 byte aligned. */
- sz = ALIGN_UP(sz, 8);
+ if (mp->alignment == 0)
+ return NULL;
+
+ /* We assume that mp->buf started mp->alignment aligned */
+ sz = ALIGN_UP(sz, mp->alignment);
/* Determine if any space available. */
if ((mp->size - mp->free_offset) < sz)