summaryrefslogtreecommitdiffstats
path: root/common/tlsf.c
diff options
context:
space:
mode:
authorAhmad Fatoum <a.fatoum@pengutronix.de>2022-05-23 08:27:56 +0200
committerSascha Hauer <s.hauer@pengutronix.de>2022-05-24 09:03:08 +0200
commit870a0ef748ef3c4644a4072acf13e9f7f2e052f5 (patch)
tree7b78df078cd97ee0bafbf41a2731553d31b339d2 /common/tlsf.c
parentd08f27e2ca9535471b547e1928a3199983aac712 (diff)
downloadbarebox-870a0ef748ef3c4644a4072acf13e9f7f2e052f5.tar.gz
barebox-870a0ef748ef3c4644a4072acf13e9f7f2e052f5.tar.xz
tlsf: fix internal overflow trying to allocate big buffers
The function adjust_request_size() has an unhandled failure mode: If aligning a buffer up overflows SIZE_MAX, it will compute a way to short buffer instead of propagating an error. Fix this by returning 0 in this case and checking for 0 whereever the function is called. 0 is a safe choice for an error code, because the function returns at least block_size_min on success and 0 was already an error code (that was just never handled). Reported-by: Jonas Martin <j.martin@pengutronix.de> Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de> Link: https://lore.barebox.org/20220523062756.774153-2-a.fatoum@pengutronix.de Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'common/tlsf.c')
-rw-r--r--common/tlsf.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/common/tlsf.c b/common/tlsf.c
index 520cce496e..3ca58e3abb 100644
--- a/common/tlsf.c
+++ b/common/tlsf.c
@@ -313,7 +313,7 @@ static size_t adjust_request_size(size_t size, size_t align)
const size_t aligned = align_up(size, align);
/* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */
- if (aligned < block_size_max)
+ if (aligned >= size && aligned < block_size_max)
{
adjust = tlsf_max(aligned, block_size_min);
}
@@ -942,7 +942,12 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size)
{
control_t* control = tlsf_cast(control_t*, tlsf);
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
- block_header_t* block = block_locate_free(control, adjust);
+ block_header_t* block;
+
+ if (!adjust)
+ return NULL;
+
+ block = block_locate_free(control, adjust);
return block_prepare_used(control, block, adjust, size);
}
@@ -969,7 +974,12 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
*/
const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
- block_header_t* block = block_locate_free(control, aligned_size);
+ block_header_t* block;
+
+ if (!adjust || !size_with_gap)
+ return NULL;
+
+ block = block_locate_free(control, aligned_size);
/* This can't be a static assert. */
tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
@@ -1059,6 +1069,9 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
tlsf_assert(!block_is_free(block) && "block already marked as free");
+ if (!adjust)
+ return NULL;
+
/*
** If the next block is used, or when combined with the current
** block, does not offer enough space, we must reallocate and copy.