summaryrefslogtreecommitdiffstats
path: root/common/tlsf.c
diff options
context:
space:
mode:
Diffstat (limited to 'common/tlsf.c')
-rw-r--r--common/tlsf.c64
1 files changed, 40 insertions, 24 deletions
diff --git a/common/tlsf.c b/common/tlsf.c
index 4247a9d3c7..ba2ed367c0 100644
--- a/common/tlsf.c
+++ b/common/tlsf.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -28,13 +30,8 @@ enum tlsf_public
/* Private constants: do not modify. */
enum tlsf_private
{
-#if defined (TLSF_64BIT)
/* All allocation sizes and addresses are aligned to 8 bytes. */
ALIGN_SIZE_LOG2 = 3,
-#else
- /* All allocation sizes and addresses are aligned to 4 bytes. */
- ALIGN_SIZE_LOG2 = 2,
-#endif
ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
/*
@@ -99,6 +96,8 @@ tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
/* Ensure we've properly tuned our sizes. */
tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
+tlsf_static_assert(ALIGN_SIZE >= CONFIG_MALLOC_ALIGNMENT);
+
/*
** Data structures and associated constants.
*/
@@ -120,6 +119,7 @@ typedef struct block_header_t
/* The size of this block, excluding the block header. */
size_t size;
+ u32 : BYTES_TO_BITS(ALIGN_SIZE - sizeof(size_t));
/* Next and previous free blocks. */
struct block_header_t* next_free;
@@ -132,28 +132,29 @@ typedef struct block_header_t
** - bit 0: whether block is busy or free
** - bit 1: whether previous block is busy or free
*/
-static const size_t block_header_free_bit = 1 << 0;
-static const size_t block_header_prev_free_bit = 1 << 1;
+#define block_header_free_bit (1 << 0)
+#define block_header_prev_free_bit (1 << 1)
/*
** The size of the block header exposed to used blocks is the size field.
** The prev_phys_block field is stored *inside* the previous free block.
*/
-static const size_t block_header_overhead = sizeof(size_t);
+#define block_header_shift offsetof(block_header_t, size)
+#define block_header_overhead ALIGN_SIZE
/* User data starts directly after the size field in a used block. */
-static const size_t block_start_offset =
- offsetof(block_header_t, size) + sizeof(size_t);
+#define block_start_offset (block_header_shift + block_header_overhead)
/*
** A free block must be large enough to store its header minus the size of
** the prev_phys_block field, and no larger than the number of addressable
** bits for FL_INDEX.
*/
-static const size_t block_size_min =
- sizeof(block_header_t) - sizeof(block_header_t*);
-static const size_t block_size_max = tlsf_cast(size_t, 1) << FL_INDEX_MAX;
+#define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*))
+#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX)
+tlsf_static_assert(block_size_min % ALIGN_SIZE == 0);
+tlsf_static_assert(block_size_max % ALIGN_SIZE == 0);
/* The TLSF control structure. */
typedef struct control_t
@@ -164,10 +165,12 @@ typedef struct control_t
/* Bitmaps for free lists. */
unsigned int fl_bitmap;
unsigned int sl_bitmap[FL_INDEX_COUNT];
+ u32 : BYTES_TO_BITS(ALIGN_SIZE - sizeof(size_t));
/* Head of free lists. */
block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
} control_t;
+tlsf_static_assert(sizeof(control_t) % ALIGN_SIZE == 0);
/* A type used for casting when doing pointer arithmetic. */
typedef ptrdiff_t tlsfptr_t;
@@ -251,7 +254,7 @@ static block_header_t* block_prev(const block_header_t* block)
static block_header_t* block_next(const block_header_t* block)
{
block_header_t* next = offset_to_block(block_to_ptr(block),
- block_size(block) - block_header_overhead);
+ block_size(block) - block_header_shift);
tlsf_assert(!block_is_last(block));
return next;
}
@@ -311,7 +314,7 @@ static size_t adjust_request_size(size_t size, size_t align)
const size_t aligned = align_up(size, align);
/* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */
- if (aligned < block_size_max)
+ if (aligned >= size && aligned < block_size_max)
{
adjust = tlsf_max(aligned, block_size_min);
}
@@ -462,7 +465,7 @@ static block_header_t* block_split(block_header_t* block, size_t size)
{
/* Calculate the amount of space left in the remaining block. */
block_header_t* remaining =
- offset_to_block(block_to_ptr(block), size - block_header_overhead);
+ offset_to_block(block_to_ptr(block), size - block_header_shift);
const size_t remain_size = block_size(block) - (size + block_header_overhead);
@@ -728,7 +731,7 @@ void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
{
tlsf_walker pool_walker = walker ? walker : default_walker;
block_header_t* block =
- offset_to_block(pool, -(int)block_header_overhead);
+ offset_to_block(pool, -(int)block_header_shift);
while (block && !block_is_last(block))
{
@@ -763,11 +766,11 @@ int tlsf_check_pool(pool_t pool)
/*
** Size of the TLSF structures in a given memory block passed to
-** tlsf_create, equal to the size of a control_t
+** tlsf_create, equal to aligned size of a control_t
*/
size_t tlsf_size(void)
{
- return sizeof(control_t);
+ return align_up(sizeof(control_t), ALIGN_SIZE);
}
size_t tlsf_align_size(void)
@@ -834,7 +837,7 @@ pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
** so that the prev_phys_block field falls outside of the pool -
** it will never be used.
*/
- block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
+ block = offset_to_block(mem, -(tlsfptr_t)block_header_shift);
block_set_size(block, pool_bytes);
block_set_free(block);
block_set_prev_used(block);
@@ -852,7 +855,7 @@ pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
{
control_t* control = tlsf_cast(control_t*, tlsf);
- block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
+ block_header_t* block = offset_to_block(pool, -(int)block_header_shift);
int fl = 0, sl = 0;
@@ -940,7 +943,12 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size)
{
control_t* control = tlsf_cast(control_t*, tlsf);
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
- block_header_t* block = block_locate_free(control, adjust);
+ block_header_t* block;
+
+ if (!adjust)
+ return NULL;
+
+ block = block_locate_free(control, adjust);
return block_prepare_used(control, block, adjust, size);
}
@@ -967,10 +975,15 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
*/
const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
- block_header_t* block = block_locate_free(control, aligned_size);
+ block_header_t* block;
+
+ if (!adjust || !size_with_gap)
+ return NULL;
+
+ block = block_locate_free(control, aligned_size);
/* This can't be a static assert. */
- tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
+ tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_shift);
if (block)
{
@@ -1057,6 +1070,9 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
tlsf_assert(!block_is_free(block) && "block already marked as free");
+ if (!adjust)
+ return NULL;
+
/*
** If the next block is used, or when combined with the current
** block, does not offer enough space, we must reallocate and copy.