summaryrefslogtreecommitdiffstats
path: root/common
diff options
context:
space:
mode:
authorJuergen Beisert <jbe@pengutronix.de>2009-12-10 13:09:02 +0100
committerJuergen Beisert <jbe@pengutronix.de>2009-12-10 13:09:02 +0100
commit8c187839eec1f02b2ffcad2c81399c82c86c5be2 (patch)
tree6de2f8947ef44723f0c34fae4419425cb9cbfed1 /common
parent5ed894e9a64ab5acd149184c1ac8edae6057bc8b (diff)
downloadbarebox-8c187839eec1f02b2ffcad2c81399c82c86c5be2.tar.gz
barebox-8c187839eec1f02b2ffcad2c81399c82c86c5be2.tar.xz
Re-format the file
Re-format the file with 'indent' and some manual corrections. Signed-off-by: Juergen Beisert <jbe@pengutronix.de>
Diffstat (limited to 'common')
-rw-r--r--common/dlmalloc.c1693
1 files changed, 781 insertions, 912 deletions
diff --git a/common/dlmalloc.c b/common/dlmalloc.c
index 6f7a9eba44..2f616370aa 100644
--- a/common/dlmalloc.c
+++ b/common/dlmalloc.c
@@ -277,7 +277,6 @@
*/
-
#ifndef DEFAULT_TOP_PAD
#define DEFAULT_TOP_PAD (0)
#endif
@@ -310,7 +309,6 @@
*/
-
#ifndef DEFAULT_MMAP_THRESHOLD
#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
#endif
@@ -350,10 +348,8 @@
All together, these considerations should lead you to use mmap
only for relatively large requests.
-
*/
-
#ifndef DEFAULT_MMAP_MAX
#define DEFAULT_MMAP_MAX (0)
#endif
@@ -397,7 +393,6 @@
returns a unique pointer for malloc(0), so does realloc(p, 0).
*/
-
/* #define REALLOC_ZERO_BYTES_FREES */
/*
@@ -446,26 +441,26 @@
/* SVID2/XPG mallinfo structure */
-struct mallinfo {
- int arena; /* total space allocated from system */
- int ordblks; /* number of non-inuse chunks */
- int smblks; /* unused -- always zero */
- int hblks; /* number of mmapped regions */
- int hblkhd; /* total space in mmapped regions */
- int usmblks; /* unused -- always zero */
- int fsmblks; /* unused -- always zero */
- int uordblks; /* total allocated space */
- int fordblks; /* total non-inuse space */
- int keepcost; /* top-most, releasable (via malloc_trim) space */
+struct mallinfo
+{
+ int arena; /* total space allocated from system */
+ int ordblks; /* number of non-inuse chunks */
+ int smblks; /* unused -- always zero */
+ int hblks; /* number of mmapped regions */
+ int hblkhd; /* total space in mmapped regions */
+ int usmblks; /* unused -- always zero */
+ int fsmblks; /* unused -- always zero */
+ int uordblks; /* total allocated space */
+ int fordblks; /* total non-inuse space */
+ int keepcost; /* top-most, releasable (via malloc_trim) space */
};
/* SVID2/XPG mallopt options */
-#define M_MXFAST 1 /* UNUSED in this malloc */
-#define M_NLBLKS 2 /* UNUSED in this malloc */
-#define M_GRAIN 3 /* UNUSED in this malloc */
-#define M_KEEP 4 /* UNUSED in this malloc */
-
+#define M_MXFAST 1 /* UNUSED in this malloc */
+#define M_NLBLKS 2 /* UNUSED in this malloc */
+#define M_GRAIN 3 /* UNUSED in this malloc */
+#define M_KEEP 4 /* UNUSED in this malloc */
/* mallopt options that actually do something */
@@ -482,22 +477,21 @@ struct mallinfo {
bsd/gnu getpagesize.h
*/
-#define malloc_getpagesize 4096
+#define malloc_getpagesize 4096
/*
Type declarations
*/
-
struct malloc_chunk
{
- INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
- INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
- struct malloc_chunk* fd; /* double links -- used only if free. */
- struct malloc_chunk* bk;
+ INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
+ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
+ struct malloc_chunk *fd; /* double links -- used only if free. */
+ struct malloc_chunk *bk;
};
-typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_chunk *mchunkptr;
/*
@@ -646,14 +640,10 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
-
-
-
/*
Physical chunk operations
*/
-
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
#define PREV_INUSE 0x1
@@ -666,7 +656,6 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
-
/* Ptr to next physical malloc_chunk. */
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
@@ -676,14 +665,10 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#define prev_chunk(p)\
((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
-
/* Treat space at ptr + offset as a chunk */
#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
-
-
-
/*
Dealing with use bits
*/
@@ -720,9 +705,6 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#define clear_inuse_bit_at_offset(p, s)\
(((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
-
-
-
/*
Dealing with size fields
*/
@@ -743,10 +725,6 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
-
-
-
-
/*
Bins
@@ -781,9 +759,9 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-#define NAV 128 /* number of bins */
+#define NAV 128 /* number of bins */
-typedef struct malloc_chunk* mbinptr;
+typedef struct malloc_chunk *mbinptr;
/* access macros */
@@ -797,9 +775,8 @@ typedef struct malloc_chunk* mbinptr;
indexing, maintain locality, and avoid some initialization tests.
*/
-#define top (bin_at(0)->fd) /* The topmost chunk */
-#define last_remainder (bin_at(1)) /* remainder from last split */
-
+#define top (bin_at(0)->fd) /* The topmost chunk */
+#define last_remainder (bin_at(1)) /* remainder from last split */
/*
Because top initially points to its own bin with initial
@@ -815,23 +792,38 @@ typedef struct malloc_chunk* mbinptr;
#define IAV(i) bin_at(i), bin_at(i)
static mbinptr av_[NAV * 2 + 2] = {
- 0, 0,
- IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
- IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
- IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
- IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
- IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
- IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
- IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
- IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
- IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
- IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
- IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
- IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
- IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
- IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
- IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
- IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
+ 0, 0,
+ IAV (0), IAV (1), IAV (2), IAV (3), IAV (4), IAV (5), IAV (6), IAV (7),
+ IAV (8), IAV (9), IAV (10), IAV (11), IAV (12), IAV (13), IAV (14),
+ IAV (15),
+ IAV (16), IAV (17), IAV (18), IAV (19), IAV (20), IAV (21), IAV (22),
+ IAV (23),
+ IAV (24), IAV (25), IAV (26), IAV (27), IAV (28), IAV (29), IAV (30),
+ IAV (31),
+ IAV (32), IAV (33), IAV (34), IAV (35), IAV (36), IAV (37), IAV (38),
+ IAV (39),
+ IAV (40), IAV (41), IAV (42), IAV (43), IAV (44), IAV (45), IAV (46),
+ IAV (47),
+ IAV (48), IAV (49), IAV (50), IAV (51), IAV (52), IAV (53), IAV (54),
+ IAV (55),
+ IAV (56), IAV (57), IAV (58), IAV (59), IAV (60), IAV (61), IAV (62),
+ IAV (63),
+ IAV (64), IAV (65), IAV (66), IAV (67), IAV (68), IAV (69), IAV (70),
+ IAV (71),
+ IAV (72), IAV (73), IAV (74), IAV (75), IAV (76), IAV (77), IAV (78),
+ IAV (79),
+ IAV (80), IAV (81), IAV (82), IAV (83), IAV (84), IAV (85), IAV (86),
+ IAV (87),
+ IAV (88), IAV (89), IAV (90), IAV (91), IAV (92), IAV (93), IAV (94),
+ IAV (95),
+ IAV (96), IAV (97), IAV (98), IAV (99), IAV (100), IAV (101), IAV (102),
+ IAV (103),
+ IAV (104), IAV (105), IAV (106), IAV (107), IAV (108), IAV (109),
+ IAV (110), IAV (111),
+ IAV (112), IAV (113), IAV (114), IAV (115), IAV (116), IAV (117),
+ IAV (118), IAV (119),
+ IAV (120), IAV (121), IAV (122), IAV (123), IAV (124), IAV (125),
+ IAV (126), IAV (127)
};
/* field-extraction macros */
@@ -868,8 +860,6 @@ static mbinptr av_[NAV * 2 + 2] = {
#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
-
-
/*
To help compensate for the large number of bins, a one-level index
structure is used for bin-by-bin searching. `binblocks' is a
@@ -880,7 +870,7 @@ static mbinptr av_[NAV * 2 + 2] = {
when all are noticed to be empty during traversal in malloc.
*/
-#define BINBLOCKWIDTH 4 /* bins per block */
+#define BINBLOCKWIDTH 4 /* bins per block */
#define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks */
@@ -909,7 +899,7 @@ ulong mem_malloc_end(void)
return malloc_end;
}
-void mem_malloc_init (void *start, void *end)
+void mem_malloc_init(void *start, void *end)
{
malloc_start = (ulong)start;
malloc_end = (ulong)end;
@@ -921,21 +911,21 @@ static void *sbrk_no_zero(ptrdiff_t increment)
ulong old = malloc_brk;
ulong new = old + increment;
- if ((new < malloc_start) || (new > malloc_end))
- return NULL;
+ if ((new < malloc_start) || (new > malloc_end))
+ return NULL;
malloc_brk = new;
return (void *)old;
}
-static void *sbrk (ptrdiff_t increment)
+static void *sbrk(ptrdiff_t increment)
{
void *old = sbrk_no_zero(increment);
/* Only clear increment, if valid address was returned */
if (old != NULL)
- memset (old, 0, increment);
+ memset(old, 0, increment);
return old;
}
@@ -946,14 +936,14 @@ static void *sbrk (ptrdiff_t increment)
/* variables holding tunable values */
#ifndef __U_BOOT__
-static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
-static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
-static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
+static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
+static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
+static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
#endif
-static unsigned long top_pad = DEFAULT_TOP_PAD;
+static unsigned long top_pad = DEFAULT_TOP_PAD;
/* The first value returned from sbrk */
-static char* sbrk_base = (char*)(-1);
+static char *sbrk_base = (char*)(-1);
/* The maximum memory obtained from system via sbrk */
static unsigned long max_sbrked_mem;
@@ -971,10 +961,6 @@ static struct mallinfo current_mallinfo;
static unsigned long mmapped_mem;
-
-
-
-
/*
Macro-based internal utilities
*/
@@ -990,7 +976,6 @@ static unsigned long mmapped_mem;
putting it ahead of others of same size.
*/
-
#define frontlink(P, S, IDX, BK, FD) \
{ \
if (S < MAX_SMALLBIN_SIZE) \
@@ -1020,7 +1005,6 @@ static unsigned long mmapped_mem;
} \
}
-
/* take a chunk off a list */
#define unlink(P, BK, FD) \
@@ -1044,129 +1028,112 @@ static unsigned long mmapped_mem;
#define clear_last_remainder \
(last_remainder->fd = last_remainder->bk = last_remainder)
-
-
-
-
/* Routines dealing with mmap(). */
-
-
-
-
/*
Extend the top-most chunk by obtaining memory from system.
Main interface to sbrk (but see also malloc_trim).
*/
-
static void malloc_extend_top(INTERNAL_SIZE_T nb)
{
- char* brk; /* return value from sbrk */
- INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
- INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
- char* new_brk; /* return of 2nd sbrk call */
- INTERNAL_SIZE_T top_size; /* new size of top chunk */
-
- mchunkptr old_top = top; /* Record state of old top */
- INTERNAL_SIZE_T old_top_size = chunksize(old_top);
- char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
-
- /* Pad request with top_pad plus minimal overhead */
-
- INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
- unsigned long pagesz = malloc_getpagesize;
-
- /* If not the first time through, round to preserve page boundary */
- /* Otherwise, we need to correct to a page size below anyway. */
- /* (We also correct below if an intervening foreign sbrk call.) */
-
- if (sbrk_base != (char*)(-1))
- sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
-
- brk = (char*)(sbrk (sbrk_size));
-
- /* Fail if sbrk failed or if a foreign sbrk call killed our space */
- if (brk == (char*)(NULL) ||
- (brk < old_end && old_top != initial_top))
- return;
-
- sbrked_mem += sbrk_size;
-
- if (brk == old_end) /* can just add bytes to current top */
- {
- top_size = sbrk_size + old_top_size;
- set_head(top, top_size | PREV_INUSE);
- }
- else
- {
- if (sbrk_base == (char*)(-1)) /* First time through. Record base */
- sbrk_base = brk;
- else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
- sbrked_mem += brk - (char*)old_end;
-
- /* Guarantee alignment of first new chunk made from this space */
- front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0)
- {
- correction = (MALLOC_ALIGNMENT) - front_misalign;
- brk += correction;
- }
- else
- correction = 0;
-
- /* Guarantee the next brk will be at a page boundary */
-
- correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
- ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
-
- /* Allocate correction */
- new_brk = (char*)(sbrk (correction));
- if (new_brk == (char*)(NULL)) return;
-
- sbrked_mem += correction;
-
- top = (mchunkptr)brk;
- top_size = new_brk - brk + correction;
- set_head(top, top_size | PREV_INUSE);
-
- if (old_top != initial_top)
- {
-
- /* There must have been an intervening foreign sbrk call. */
- /* A double fencepost is necessary to prevent consolidation */
-
- /* If not enough space to do this, then user did something very wrong */
- if (old_top_size < MINSIZE)
- {
- set_head(top, PREV_INUSE); /* will force null return from malloc */
- return;
- }
-
- /* Also keep size a multiple of MALLOC_ALIGNMENT */
- old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
- set_head_size(old_top, old_top_size);
- chunk_at_offset(old_top, old_top_size )->size =
- SIZE_SZ|PREV_INUSE;
- chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
- SIZE_SZ|PREV_INUSE;
- /* If possible, release the rest. */
- if (old_top_size >= MINSIZE)
- free(chunk2mem(old_top));
- }
- }
-
- if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
- max_sbrked_mem = sbrked_mem;
- if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
- max_total_mem = mmapped_mem + sbrked_mem;
-}
-
+ char *brk; /* return value from sbrk */
+ INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
+ INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
+ char *new_brk; /* return of 2nd sbrk call */
+ INTERNAL_SIZE_T top_size; /* new size of top chunk */
+
+ mchunkptr old_top = top; /* Record state of old top */
+ INTERNAL_SIZE_T old_top_size = chunksize(old_top);
+ char *old_end = (char *) (chunk_at_offset(old_top, old_top_size));
+
+ /* Pad request with top_pad plus minimal overhead */
+
+ INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
+ unsigned long pagesz = malloc_getpagesize;
+
+ /* If not the first time through, round to preserve page boundary */
+ /* Otherwise, we need to correct to a page size below anyway. */
+ /* (We also correct below if an intervening foreign sbrk call.) */
+
+ if (sbrk_base != (char*)(-1))
+ sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
+
+ brk = (char*)(sbrk(sbrk_size));
+
+ /* Fail if sbrk failed or if a foreign sbrk call killed our space */
+ if (brk == (char*)(NULL) || (brk < old_end && old_top != initial_top))
+ return;
+
+ sbrked_mem += sbrk_size;
+
+ if (brk == old_end) { /* can just add bytes to current top */
+ top_size = sbrk_size + old_top_size;
+ set_head (top, top_size | PREV_INUSE);
+ } else {
+ if (sbrk_base == (char*)(-1)) /* First time through. Record base */
+ sbrk_base = brk;
+ else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
+ sbrked_mem += brk - (char*)old_end;
+
+ /* Guarantee alignment of first new chunk made from this space */
+ front_misalign =
+ (unsigned long) chunk2mem(brk) & MALLOC_ALIGN_MASK;
+ if (front_misalign > 0) {
+ correction = (MALLOC_ALIGNMENT) - front_misalign;
+ brk += correction;
+ } else
+ correction = 0;
+
+ /* Guarantee the next brk will be at a page boundary */
+
+ correction += ((((unsigned long) (brk + sbrk_size)) +
+ (pagesz - 1)) & ~(pagesz - 1)) -
+ ((unsigned long) (brk + sbrk_size));
+
+ /* Allocate correction */
+ new_brk = (char*) (sbrk(correction));
+ if (new_brk == (char*)(NULL))
+ return;
+
+ sbrked_mem += correction;
+
+ top = (mchunkptr) brk;
+ top_size = new_brk - brk + correction;
+ set_head (top, top_size | PREV_INUSE);
+
+ if (old_top != initial_top) {
+
+ /* There must have been an intervening foreign sbrk call. */
+ /* A double fencepost is necessary to prevent consolidation */
+
+ /* If not enough space to do this, then user did something very wrong */
+ if (old_top_size < MINSIZE) {
+ set_head (top, PREV_INUSE); /* will force null return from malloc */
+ return;
+ }
+
+ /* Also keep size a multiple of MALLOC_ALIGNMENT */
+ old_top_size = (old_top_size -
+ 3 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
+ set_head_size (old_top, old_top_size);
+ chunk_at_offset (old_top, old_top_size)->size =
+ SIZE_SZ | PREV_INUSE;
+ chunk_at_offset (old_top, old_top_size + SIZE_SZ)->size =
+ SIZE_SZ | PREV_INUSE;
+ /* If possible, release the rest. */
+ if (old_top_size >= MINSIZE)
+ free(chunk2mem (old_top));
+ }
+ }
-
+ if ((unsigned long) sbrked_mem > (unsigned long) max_sbrked_mem)
+ max_sbrked_mem = sbrked_mem;
+ if ((unsigned long) (mmapped_mem + sbrked_mem) > (unsigned long) max_total_mem)
+ max_total_mem = mmapped_mem + sbrked_mem;
+}
/* Main public routines */
-
/*
Malloc Algorthim:
@@ -1223,233 +1190,191 @@ static void malloc_extend_top(INTERNAL_SIZE_T nb)
always true of any allocated chunk; i.e., that each allocated
chunk borders either a previously allocated and still in-use chunk,
or the base of its memory arena.)
-
*/
-
-void* malloc(size_t bytes)
+void *malloc(size_t bytes)
{
- mchunkptr victim; /* inspected/selected chunk */
- INTERNAL_SIZE_T victim_size; /* its size */
- int idx; /* index for bin traversal */
- mbinptr bin; /* associated bin */
- mchunkptr remainder; /* remainder from a split */
- long remainder_size; /* its size */
- int remainder_index; /* its bin index */
- unsigned long block; /* block traverser bit */
- int startidx; /* first bin of a traversed block */
- mchunkptr fwd; /* misc temp for linking */
- mchunkptr bck; /* misc temp for linking */
- mbinptr q; /* misc temp */
-
- INTERNAL_SIZE_T nb;
-
- if ((long)bytes < 0) return 0;
-
- nb = request2size(bytes); /* padded request size; */
-
- /* Check for exact match in a bin */
-
- if (is_small_request(nb)) /* Faster version for small requests */
- {
- idx = smallbin_index(nb);
-
- /* No traversal or size check necessary for small bins. */
-
- q = bin_at(idx);
- victim = last(q);
-
- /* Also scan the next one, since it would have a remainder < MINSIZE */
- if (victim == q)
- {
- q = next_bin(q);
- victim = last(q);
- }
- if (victim != q)
- {
- victim_size = chunksize(victim);
- unlink(victim, bck, fwd);
- set_inuse_bit_at_offset(victim, victim_size);
- return chunk2mem(victim);
- }
-
- idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
-
- }
- else
- {
- idx = bin_index(nb);
- bin = bin_at(idx);
-
- for (victim = last(bin); victim != bin; victim = victim->bk)
- {
- victim_size = chunksize(victim);
- remainder_size = victim_size - nb;
-
- if (remainder_size >= (long)MINSIZE) /* too big */
- {
- --idx; /* adjust to rescan below after checking last remainder */
- break;
- }
-
- else if (remainder_size >= 0) /* exact fit */
- {
- unlink(victim, bck, fwd);
- set_inuse_bit_at_offset(victim, victim_size);
- return chunk2mem(victim);
- }
- }
-
- ++idx;
-
- }
-
- /* Try to use the last split-off remainder */
-
- if ( (victim = last_remainder->fd) != last_remainder)
- {
- victim_size = chunksize(victim);
- remainder_size = victim_size - nb;
-
- if (remainder_size >= (long)MINSIZE) /* re-split */
- {
- remainder = chunk_at_offset(victim, nb);
- set_head(victim, nb | PREV_INUSE);
- link_last_remainder(remainder);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_foot(remainder, remainder_size);
- return chunk2mem(victim);
- }
-
- clear_last_remainder;
-
- if (remainder_size >= 0) /* exhaust */
- {
- set_inuse_bit_at_offset(victim, victim_size);
- return chunk2mem(victim);
- }
-
- /* Else place in bin */
-
- frontlink(victim, victim_size, remainder_index, bck, fwd);
- }
-
- /*
- If there are any possibly nonempty big-enough blocks,
- search for best fitting chunk by scanning bins in blockwidth units.
- */
-
- if ( (block = idx2binblock(idx)) <= binblocks)
- {
-
- /* Get to the first marked block */
-
- if ( (block & binblocks) == 0)
- {
- /* force to an even block boundary */
- idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
- block <<= 1;
- while ((block & binblocks) == 0)
- {
- idx += BINBLOCKWIDTH;
- block <<= 1;
- }
- }
-
- /* For each possibly nonempty block ... */
- for (;;)
- {
- startidx = idx; /* (track incomplete blocks) */
- q = bin = bin_at(idx);
-
- /* For each bin in this block ... */
- do
- {
- /* Find and use first big enough chunk ... */
-
- for (victim = last(bin); victim != bin; victim = victim->bk)
- {
- victim_size = chunksize(victim);
- remainder_size = victim_size - nb;
-
- if (remainder_size >= (long)MINSIZE) /* split */
- {
- remainder = chunk_at_offset(victim, nb);
- set_head(victim, nb | PREV_INUSE);
- unlink(victim, bck, fwd);
- link_last_remainder(remainder);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_foot(remainder, remainder_size);
- return chunk2mem(victim);
- }
-
- else if (remainder_size >= 0) /* take */
- {
- set_inuse_bit_at_offset(victim, victim_size);
- unlink(victim, bck, fwd);
- return chunk2mem(victim);
- }
-
+ mchunkptr victim; /* inspected/selected chunk */
+ INTERNAL_SIZE_T victim_size; /* its size */
+ int idx; /* index for bin traversal */
+ mbinptr bin; /* associated bin */
+ mchunkptr remainder; /* remainder from a split */
+ long remainder_size; /* its size */
+ int remainder_index; /* its bin index */
+ unsigned long block; /* block traverser bit */
+ int startidx; /* first bin of a traversed block */
+ mchunkptr fwd; /* misc temp for linking */
+ mchunkptr bck; /* misc temp for linking */
+ mbinptr q; /* misc temp */
+
+ INTERNAL_SIZE_T nb;
+
+ if ((long) bytes < 0)
+ return 0;
+
+ nb = request2size(bytes); /* padded request size; */
+
+ /* Check for exact match in a bin */
+
+ if (is_small_request(nb)) { /* Faster version for small requests */
+ idx = smallbin_index(nb);
+
+ /* No traversal or size check necessary for small bins. */
+
+ q = bin_at(idx);
+ victim = last(q);
+
+ /* Also scan the next one, since it would have a remainder < MINSIZE */
+ if (victim == q) {
+ q = next_bin(q);
+ victim = last(q);
+ }
+ if (victim != q) {
+ victim_size = chunksize(victim);
+ unlink(victim, bck, fwd);
+ set_inuse_bit_at_offset(victim, victim_size);
+ return chunk2mem(victim);
+ }
+ idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
+ } else {
+ idx = bin_index(nb);
+ bin = bin_at(idx);
+
+ for (victim = last(bin); victim != bin; victim = victim->bk) {
+ victim_size = chunksize(victim);
+ remainder_size = victim_size - nb;
+
+ if (remainder_size >= (long)MINSIZE) { /* too big */
+ --idx; /* adjust to rescan below after checking last remainder */
+ break;
+ }
+
+ else if (remainder_size >= 0) { /* exact fit */
+ unlink(victim, bck, fwd);
+ set_inuse_bit_at_offset(victim, victim_size);
+ return chunk2mem(victim);
+ }
+ }
+ ++idx;
}
- bin = next_bin(bin);
-
- } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
-
- /* Clear out the block bit. */
-
- do /* Possibly backtrack to try to clear a partial block */
- {
- if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
- {
- binblocks &= ~block;
- break;
- }
- --startidx;
- q = prev_bin(q);
- } while (first(q) == q);
-
- /* Get to the next possibly nonempty block */
-
- if ( (block <<= 1) <= binblocks && (block != 0) )
- {
- while ((block & binblocks) == 0)
- {
- idx += BINBLOCKWIDTH;
- block <<= 1;
+ /* Try to use the last split-off remainder */
+
+ if ((victim = last_remainder->fd) != last_remainder) {
+ victim_size = chunksize(victim);
+ remainder_size = victim_size - nb;
+
+ if (remainder_size >= (long)MINSIZE) { /* re-split */
+ remainder = chunk_at_offset(victim, nb);
+ set_head(victim, nb | PREV_INUSE);
+ link_last_remainder(remainder);
+ set_head(remainder, remainder_size | PREV_INUSE);
+ set_foot(remainder, remainder_size);
+ return chunk2mem(victim);
+ }
+
+ clear_last_remainder;
+
+ if (remainder_size >= 0) { /* exhaust */
+ set_inuse_bit_at_offset(victim, victim_size);
+ return chunk2mem(victim);
+ }
+ /* Else place in bin */
+ frontlink(victim, victim_size, remainder_index, bck, fwd);
}
- }
- else
- break;
- }
- }
-
-
- /* Try to use top chunk */
-
- /* Require that there be a remainder, ensuring top always exists */
- if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
- {
+ /*
+ If there are any possibly nonempty big-enough blocks,
+ search for best fitting chunk by scanning bins in blockwidth units.
+ */
+ if ((block = idx2binblock (idx)) <= binblocks) {
+ /* Get to the first marked block */
+ if ((block & binblocks) == 0) {
+ /* force to an even block boundary */
+ idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
+ block <<= 1;
+ while ((block & binblocks) == 0) {
+ idx += BINBLOCKWIDTH;
+ block <<= 1;
+ }
+ }
+
+ /* For each possibly nonempty block ... */
+ for (;;) {
+ startidx = idx; /* (track incomplete blocks) */
+ q = bin = bin_at(idx);
+
+ /* For each bin in this block ... */
+ do {
+ /* Find and use first big enough chunk ... */
+ for (victim = last(bin); victim != bin;
+ victim = victim->bk) {
+ victim_size = chunksize(victim);
+ remainder_size = victim_size - nb;
+
+ if (remainder_size >= (long)MINSIZE) { /* split */
+ remainder =
+ chunk_at_offset (victim,
+ nb);
+ set_head(victim,
+ nb | PREV_INUSE);
+ unlink(victim, bck, fwd);
+ link_last_remainder(remainder);
+ set_head(remainder,
+ remainder_size |
+ PREV_INUSE);
+ set_foot(remainder,
+ remainder_size);
+ return chunk2mem(victim);
+ } else if (remainder_size >= 0) { /* take */
+ set_inuse_bit_at_offset(victim,
+ victim_size);
+ unlink(victim, bck, fwd);
+ return chunk2mem(victim);
+ }
+ }
+ bin = next_bin (bin);
+ } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
+
+ /* Clear out the block bit. */
+ do { /* Possibly backtrack to try to clear a partial block */
+ if ((startidx & (BINBLOCKWIDTH - 1)) == 0) {
+ binblocks &= ~block;
+ break;
+ }
+ --startidx;
+ q = prev_bin(q);
+ } while (first(q) == q);
+
+ /* Get to the next possibly nonempty block */
+
+ if ((block <<= 1) <= binblocks && (block != 0)) {
+ while ((block & binblocks) == 0) {
+ idx += BINBLOCKWIDTH;
+ block <<= 1;
+ }
+ } else
+ break;
+ }
+ }
- /* Try to extend */
- malloc_extend_top(nb);
- if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
- return 0; /* propagate failure */
- }
+ /* Try to use top chunk */
- victim = top;
- set_head(victim, nb | PREV_INUSE);
- top = chunk_at_offset(victim, nb);
- set_head(top, remainder_size | PREV_INUSE);
- return chunk2mem(victim);
+ /* Require that there be a remainder, ensuring top always exists */
+ if ((remainder_size = chunksize (top) - nb) < (long) MINSIZE) {
+ /* Try to extend */
+ malloc_extend_top(nb);
+ if ((remainder_size = chunksize(top) - nb) < (long) MINSIZE)
+ return 0; /* propagate failure */
+ }
+ victim = top;
+ set_head(victim, nb | PREV_INUSE);
+ top = chunk_at_offset(victim, nb);
+ set_head(top, remainder_size | PREV_INUSE);
+ return chunk2mem(victim);
}
-
-
-
/*
-
free() algorithm :
cases:
@@ -1466,97 +1391,83 @@ void* malloc(size_t bytes)
4. Other chunks are consolidated as they arrive, and
placed in corresponding bins. (This includes the case of
consolidating with the current `last_remainder').
-
*/
-
-
-void free(void* mem)
+void free(void *mem)
{
- mchunkptr p; /* chunk corresponding to mem */
- INTERNAL_SIZE_T hd; /* its head field */
- INTERNAL_SIZE_T sz; /* its size */
- int idx; /* its bin index */
- mchunkptr next; /* next contiguous chunk */
- INTERNAL_SIZE_T nextsz; /* its size */
- INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
- mchunkptr bck; /* misc temp for linking */
- mchunkptr fwd; /* misc temp for linking */
- int islr; /* track whether merging with last_remainder */
-
- if (mem == 0) /* free(0) has no effect */
- return;
-
- p = mem2chunk(mem);
- hd = p->size;
-
-
- sz = hd & ~PREV_INUSE;
- next = chunk_at_offset(p, sz);
- nextsz = chunksize(next);
-
- if (next == top) /* merge with top */
- {
- sz += nextsz;
-
- if (!(hd & PREV_INUSE)) /* consolidate backward */
- {
- prevsz = p->prev_size;
- p = chunk_at_offset(p, -((long) prevsz));
- sz += prevsz;
- unlink(p, bck, fwd);
- }
-
- set_head(p, sz | PREV_INUSE);
- top = p;
+ mchunkptr p; /* chunk corresponding to mem */
+ INTERNAL_SIZE_T hd; /* its head field */
+ INTERNAL_SIZE_T sz; /* its size */
+ int idx; /* its bin index */
+ mchunkptr next; /* next contiguous chunk */
+ INTERNAL_SIZE_T nextsz; /* its size */
+ INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
+ mchunkptr bck; /* misc temp for linking */
+ mchunkptr fwd; /* misc temp for linking */
+ int islr; /* track whether merging with last_remainder */
+
+ if (mem == 0) /* free(0) has no effect */
+ return;
+
+ p = mem2chunk(mem);
+ hd = p->size;
+
+
+ sz = hd & ~PREV_INUSE;
+ next = chunk_at_offset(p, sz);
+ nextsz = chunksize(next);
+
+ if (next == top) { /* merge with top */
+ sz += nextsz;
+
+ if (!(hd & PREV_INUSE)) { /* consolidate backward */
+ prevsz = p->prev_size;
+ p = chunk_at_offset(p, -((long) prevsz));
+ sz += prevsz;
+ unlink (p, bck, fwd);
+ }
+
+ set_head(p, sz | PREV_INUSE);
+ top = p;
#ifdef USE_MALLOC_TRIM
- if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
- malloc_trim(top_pad);
+ if ((unsigned long) (sz) >= (unsigned long)trim_threshold)
+ malloc_trim(top_pad);
#endif
- return;
- }
-
- set_head(next, nextsz); /* clear inuse bit */
-
- islr = 0;
-
- if (!(hd & PREV_INUSE)) /* consolidate backward */
- {
- prevsz = p->prev_size;
- p = chunk_at_offset(p, -((long) prevsz));
- sz += prevsz;
-
- if (p->fd == last_remainder) /* keep as last_remainder */
- islr = 1;
- else
- unlink(p, bck, fwd);
- }
-
- if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
- {
- sz += nextsz;
-
- if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
- {
- islr = 1;
- link_last_remainder(p);
- }
- else
- unlink(next, bck, fwd);
- }
-
-
- set_head(p, sz | PREV_INUSE);
- set_foot(p, sz);
- if (!islr)
- frontlink(p, sz, idx, bck, fwd);
-}
+ return;
+ }
+ set_head(next, nextsz); /* clear inuse bit */
-
+ islr = 0;
+ if (!(hd & PREV_INUSE)) { /* consolidate backward */
+ prevsz = p->prev_size;
+ p = chunk_at_offset(p, -((long) prevsz));
+ sz += prevsz;
+
+ if (p->fd == last_remainder) /* keep as last_remainder */
+ islr = 1;
+ else
+ unlink(p, bck, fwd);
+ }
+
+ if (!(inuse_bit_at_offset(next, nextsz))) { /* consolidate forward */
+ sz += nextsz;
+
+ if (!islr && next->fd == last_remainder) { /* re-insert last_remainder */
+ islr = 1;
+ link_last_remainder(p);
+ } else
+ unlink(next, bck, fwd);
+ }
-/*
+ set_head(p, sz | PREV_INUSE);
+ set_foot(p, sz);
+ if (!islr)
+ frontlink(p, sz, idx, bck, fwd);
+}
+
+/*
Realloc algorithm:
Chunks that were obtained via mmap cannot be extended or shrunk
@@ -1586,189 +1497,179 @@ void free(void* mem)
I don't know of any programs still relying on this feature,
and allowing it would also allow too many other incorrect
usages of realloc to be sensible.
-
-
*/
-
-
-void* realloc(void* oldmem, size_t bytes)
+void *realloc(void *oldmem, size_t bytes)
{
- INTERNAL_SIZE_T nb; /* padded request size */
+ INTERNAL_SIZE_T nb; /* padded request size */
- mchunkptr oldp; /* chunk corresponding to oldmem */
- INTERNAL_SIZE_T oldsize; /* its size */
+ mchunkptr oldp; /* chunk corresponding to oldmem */
+ INTERNAL_SIZE_T oldsize; /* its size */
- mchunkptr newp; /* chunk to return */
- INTERNAL_SIZE_T newsize; /* its size */
- void* newmem; /* corresponding user mem */
+ mchunkptr newp; /* chunk to return */
+ INTERNAL_SIZE_T newsize; /* its size */
+ void *newmem; /* corresponding user mem */
- mchunkptr next; /* next contiguous chunk after oldp */
- INTERNAL_SIZE_T nextsize; /* its size */
+ mchunkptr next; /* next contiguous chunk after oldp */
+ INTERNAL_SIZE_T nextsize; /* its size */
- mchunkptr prev; /* previous contiguous chunk before oldp */
- INTERNAL_SIZE_T prevsize; /* its size */
+ mchunkptr prev; /* previous contiguous chunk before oldp */
+ INTERNAL_SIZE_T prevsize; /* its size */
- mchunkptr remainder; /* holds split off extra space from newp */
- INTERNAL_SIZE_T remainder_size; /* its size */
+ mchunkptr remainder; /* holds split off extra space from newp */
+ INTERNAL_SIZE_T remainder_size; /* its size */
- mchunkptr bck; /* misc temp for linking */
- mchunkptr fwd; /* misc temp for linking */
+ mchunkptr bck; /* misc temp for linking */
+ mchunkptr fwd; /* misc temp for linking */
#ifdef REALLOC_ZERO_BYTES_FREES
- if (bytes == 0) { free(oldmem); return 0; }
+ if (bytes == 0) {
+ free(oldmem);
+ return 0;
+ }
#endif
- if ((long)bytes < 0) return 0;
-
- /* realloc of null is supposed to be same as malloc */
- if (oldmem == 0) return malloc(bytes);
-
- newp = oldp = mem2chunk(oldmem);
- newsize = oldsize = chunksize(oldp);
-
-
- nb = request2size(bytes);
-
-
- if ((long)(oldsize) < (long)(nb))
- {
+ if ((long)bytes < 0)
+ return 0;
+
+ /* realloc of null is supposed to be same as malloc */
+ if (oldmem == 0)
+ return malloc(bytes);
+
+ newp = oldp = mem2chunk(oldmem);
+ newsize = oldsize = chunksize(oldp);
+
+
+ nb = request2size(bytes);
+
+
+ if ((long)(oldsize) < (long)(nb)) {
+
+ /* Try expanding forward */
+
+ next = chunk_at_offset(oldp, oldsize);
+ if (next == top || !inuse(next)) {
+ nextsize = chunksize(next);
+
+ /* Forward into top only if a remainder */
+ if (next == top) {
+ if ((long)(nextsize + newsize) >=
+ (long)(nb + MINSIZE)) {
+ newsize += nextsize;
+ top = chunk_at_offset(oldp, nb);
+ set_head (top,
+ (newsize - nb) | PREV_INUSE);
+ set_head_size(oldp, nb);
+ return chunk2mem(oldp);
+ }
+ }
+
+ /* Forward into next chunk */
+ else if (((long) (nextsize + newsize) >= (long) (nb))) {
+ unlink(next, bck, fwd);
+ newsize += nextsize;
+ goto split;
+ }
+ } else {
+ next = 0;
+ nextsize = 0;
+ }
+
+ /* Try shifting backwards. */
+
+ if (!prev_inuse(oldp)) {
+ prev = prev_chunk(oldp);
+ prevsize = chunksize(prev);
+
+ /* try forward + backward first to save a later consolidation */
+
+ if (next != 0) {
+ /* into top */
+ if (next == top) {
+ if ((long)
+ (nextsize + prevsize + newsize) >=
+ (long)(nb + MINSIZE)) {
+ unlink (prev, bck, fwd);
+ newp = prev;
+ newsize += prevsize + nextsize;
+ newmem = chunk2mem(newp);
+ memcpy(newmem, oldmem,
+ oldsize - SIZE_SZ);
+ top = chunk_at_offset(newp, nb);
+ set_head(top,
+ (newsize -
+ nb) | PREV_INUSE);
+ set_head_size(newp, nb);
+ return newmem;
+ }
+ }
+
+ /* into next chunk */
+ else if (((long)(nextsize + prevsize + newsize)
+ >= (long)(nb))) {
+ unlink(next, bck, fwd);
+ unlink(prev, bck, fwd);
+ newp = prev;
+ newsize += nextsize + prevsize;
+ newmem = chunk2mem(newp);
+ memcpy(newmem, oldmem,
+ oldsize - SIZE_SZ);
+ goto split;
+ }
+ }
+
+ /* backward only */
+ if (prev != 0
+ && (long)(prevsize + newsize) >= (long)nb) {
+ unlink(prev, bck, fwd);
+ newp = prev;
+ newsize += prevsize;
+ newmem = chunk2mem(newp);
+ memcpy(newmem, oldmem, oldsize - SIZE_SZ);
+ goto split;
+ }
+ }
+
+ /* Must allocate */
+
+ newmem = malloc(bytes);
+
+ if (newmem == 0) /* propagate failure */
+ return 0;
+
+ /* Avoid copy if newp is next chunk after oldp. */
+ /* (This can only happen when new chunk is sbrk'ed.) */
+
+ if ((newp = mem2chunk(newmem)) == next_chunk(oldp)) {
+ newsize += chunksize(newp);
+ newp = oldp;
+ goto split;
+ }
+
+ /* Otherwise copy, free, and exit */
+ memcpy(newmem, oldmem, oldsize - SIZE_SZ);
+ free(oldmem);
+ return newmem;
+ }
- /* Try expanding forward */
- next = chunk_at_offset(oldp, oldsize);
- if (next == top || !inuse(next))
- {
- nextsize = chunksize(next);
+split: /* split off extra room in old or expanded chunk */
- /* Forward into top only if a remainder */
- if (next == top)
- {
- if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
- {
- newsize += nextsize;
- top = chunk_at_offset(oldp, nb);
- set_head(top, (newsize - nb) | PREV_INUSE);
- set_head_size(oldp, nb);
- return chunk2mem(oldp);
- }
- }
-
- /* Forward into next chunk */
- else if (((long)(nextsize + newsize) >= (long)(nb)))
- {
- unlink(next, bck, fwd);
- newsize += nextsize;
- goto split;
- }
- }
- else
- {
- next = 0;
- nextsize = 0;
- }
-
- /* Try shifting backwards. */
-
- if (!prev_inuse(oldp))
- {
- prev = prev_chunk(oldp);
- prevsize = chunksize(prev);
-
- /* try forward + backward first to save a later consolidation */
-
- if (next != 0)
- {
- /* into top */
- if (next == top)
- {
- if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
- {
- unlink(prev, bck, fwd);
- newp = prev;
- newsize += prevsize + nextsize;
- newmem = chunk2mem(newp);
- memcpy(newmem, oldmem, oldsize - SIZE_SZ);
- top = chunk_at_offset(newp, nb);
- set_head(top, (newsize - nb) | PREV_INUSE);
- set_head_size(newp, nb);
- return newmem;
- }
+ if (newsize - nb >= MINSIZE) { /* split off remainder */
+ remainder = chunk_at_offset(newp, nb);
+ remainder_size = newsize - nb;
+ set_head_size(newp, nb);
+ set_head(remainder, remainder_size | PREV_INUSE);
+ set_inuse_bit_at_offset(remainder, remainder_size);
+ free (chunk2mem(remainder)); /* let free() deal with it */
+ } else {
+ set_head_size(newp, newsize);
+ set_inuse_bit_at_offset(newp, newsize);
}
- /* into next chunk */
- else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
- {
- unlink(next, bck, fwd);
- unlink(prev, bck, fwd);
- newp = prev;
- newsize += nextsize + prevsize;
- newmem = chunk2mem(newp);
- memcpy(newmem, oldmem, oldsize - SIZE_SZ);
- goto split;
- }
- }
-
- /* backward only */
- if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
- {
- unlink(prev, bck, fwd);
- newp = prev;
- newsize += prevsize;
- newmem = chunk2mem(newp);
- memcpy(newmem, oldmem, oldsize - SIZE_SZ);
- goto split;
- }
- }
-
- /* Must allocate */
-
- newmem = malloc (bytes);
-
- if (newmem == 0) /* propagate failure */
- return 0;
-
- /* Avoid copy if newp is next chunk after oldp. */
- /* (This can only happen when new chunk is sbrk'ed.) */
-
- if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
- {
- newsize += chunksize(newp);
- newp = oldp;
- goto split;
- }
-
- /* Otherwise copy, free, and exit */
- memcpy(newmem, oldmem, oldsize - SIZE_SZ);
- free(oldmem);
- return newmem;
- }
-
-
- split: /* split off extra room in old or expanded chunk */
-
- if (newsize - nb >= MINSIZE) /* split off remainder */
- {
- remainder = chunk_at_offset(newp, nb);
- remainder_size = newsize - nb;
- set_head_size(newp, nb);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_inuse_bit_at_offset(remainder, remainder_size);
- free(chunk2mem(remainder)); /* let free() deal with it */
- }
- else
- {
- set_head_size(newp, newsize);
- set_inuse_bit_at_offset(newp, newsize);
- }
-
- return chunk2mem(newp);
+ return chunk2mem(newp);
}
-
-
-
/*
-
memalign algorithm:
memalign requests more than enough space from malloc, finds a spot
@@ -1782,177 +1683,158 @@ void* realloc(void* oldmem, size_t bytes)
bother calling memalign with an argument of 8 or less.
Overreliance on memalign is a sure way to fragment space.
-
*/
-
-
-void* memalign(size_t alignment, size_t bytes)
+void *memalign(size_t alignment, size_t bytes)
{
- INTERNAL_SIZE_T nb; /* padded request size */
- char* m; /* memory returned by malloc call */
- mchunkptr p; /* corresponding chunk */
- char* brk; /* alignment point within p */
- mchunkptr newp; /* chunk to return */
- INTERNAL_SIZE_T newsize; /* its size */
- INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
- mchunkptr remainder; /* spare room at end to split off */
- long remainder_size; /* its size */
+ INTERNAL_SIZE_T nb; /* padded request size */
+ char *m; /* memory returned by malloc call */
+ mchunkptr p; /* corresponding chunk */
+ char *brk; /* alignment point within p */
+ mchunkptr newp; /* chunk to return */
+ INTERNAL_SIZE_T newsize; /* its size */
+ INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
+ mchunkptr remainder; /* spare room at end to split off */
+ long remainder_size; /* its size */
- if ((long)bytes < 0) return 0;
+ if ((long) bytes < 0)
+ return 0;
- /* If need less alignment than we give anyway, just relay to malloc */
+ /* If need less alignment than we give anyway, just relay to malloc */
- if (alignment <= MALLOC_ALIGNMENT) return malloc(bytes);
+ if (alignment <= MALLOC_ALIGNMENT)
+ return malloc(bytes);
- /* Otherwise, ensure that it is at least a minimum chunk size */
+ /* Otherwise, ensure that it is at least a minimum chunk size */
- if (alignment < MINSIZE) alignment = MINSIZE;
+ if (alignment < MINSIZE)
+ alignment = MINSIZE;
- /* Call malloc with worst case padding to hit alignment. */
+ /* Call malloc with worst case padding to hit alignment. */
- nb = request2size(bytes);
- m = (char*)(malloc(nb + alignment + MINSIZE));
+ nb = request2size(bytes);
+ m = (char*)(malloc (nb + alignment + MINSIZE));
- if (m == 0) return 0; /* propagate failure */
+ if (m == 0)
+ return 0; /* propagate failure */
- p = mem2chunk(m);
+ p = mem2chunk(m);
- if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
- {
- }
- else /* misaligned */
- {
- /*
- Find an aligned spot inside chunk.
- Since we need to give back leading space in a chunk of at
- least MINSIZE, if the first calculation places us at
- a spot with less than MINSIZE leader, we can move to the
- next aligned spot -- we've allocated enough total room so that
- this is always possible.
- */
+ if ((((unsigned long)(m)) % alignment) == 0) { /* aligned */
+ } else { /* misaligned */
- brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
- if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
+ /*
+ Find an aligned spot inside chunk.
+ Since we need to give back leading space in a chunk of at
+ least MINSIZE, if the first calculation places us at
+ a spot with less than MINSIZE leader, we can move to the
+ next aligned spot -- we've allocated enough total room so that
+ this is always possible.
+ */
- newp = (mchunkptr)brk;
- leadsize = brk - (char*)(p);
- newsize = chunksize(p) - leadsize;
+ brk = (char*) mem2chunk(((unsigned long) (m + alignment - 1)) &
+ -((signed) alignment));
+ if ((long)(brk - (char*)(p)) < MINSIZE)
+ brk = brk + alignment;
+ newp = (mchunkptr)brk;
+ leadsize = brk - (char*)(p);
+ newsize = chunksize(p) - leadsize;
- /* give back leader, use the rest */
+ /* give back leader, use the rest */
- set_head(newp, newsize | PREV_INUSE);
- set_inuse_bit_at_offset(newp, newsize);
- set_head_size(p, leadsize);
- free(chunk2mem(p));
- p = newp;
- }
-
- /* Also give back spare room at the end */
+ set_head(newp, newsize | PREV_INUSE);
+ set_inuse_bit_at_offset(newp, newsize);
+ set_head_size(p, leadsize);
+ free(chunk2mem(p));
+ p = newp;
+ }
- remainder_size = chunksize(p) - nb;
+ /* Also give back spare room at the end */
- if (remainder_size >= (long)MINSIZE)
- {
- remainder = chunk_at_offset(p, nb);
- set_head(remainder, remainder_size | PREV_INUSE);
- set_head_size(p, nb);
- free(chunk2mem(remainder));
- }
+ remainder_size = chunksize(p) - nb;
- return chunk2mem(p);
+ if (remainder_size >= (long)MINSIZE) {
+ remainder = chunk_at_offset(p, nb);
+ set_head(remainder, remainder_size | PREV_INUSE);
+ set_head_size(p, nb);
+ free (chunk2mem(remainder));
+ }
+ return chunk2mem(p);
}
-
-
-
/*
- valloc just invokes memalign with alignment argument equal
- to the page size of the system (or as near to this as can
- be figured out from all the includes/defines above.)
-*/
-
-void* valloc(size_t bytes)
+ * valloc just invokes memalign with alignment argument equal
+ * to the page size of the system (or as near to this as can
+ * be figured out from all the includes/defines above.)
+ */
+void *valloc(size_t bytes)
{
- return memalign (malloc_getpagesize, bytes);
+ return memalign(malloc_getpagesize, bytes);
}
/*
- pvalloc just invokes valloc for the nearest pagesize
- that will accommodate request
-*/
-
-
-void* pvalloc(size_t bytes)
+ * pvalloc just invokes valloc for the nearest pagesize
+ * that will accommodate request
+ */
+void *pvalloc (size_t bytes)
{
- size_t pagesize = malloc_getpagesize;
- return memalign (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
+ size_t pagesize = malloc_getpagesize;
+
+ return memalign(pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
}
/*
-
- calloc calls malloc, then zeroes out the allocated chunk.
-
-*/
-
-void* calloc(size_t n, size_t elem_size)
+ *
+ * calloc calls malloc, then zeroes out the allocated chunk.
+ *
+ */
+void *calloc(size_t n, size_t elem_size)
{
- mchunkptr p;
- INTERNAL_SIZE_T csz;
-
- INTERNAL_SIZE_T sz = n * elem_size;
-
-
- /* check if expand_top called, in which case don't need to clear */
- mchunkptr oldtop = top;
- INTERNAL_SIZE_T oldtopsize = chunksize(top);
+ mchunkptr p;
+ INTERNAL_SIZE_T csz;
+ INTERNAL_SIZE_T sz = n * elem_size;
- void* mem = malloc (sz);
+ /* check if expand_top called, in which case don't need to clear */
+ mchunkptr oldtop = top;
+ INTERNAL_SIZE_T oldtopsize = chunksize(top);
- if ((long)n < 0) return 0;
+ void *mem = malloc(sz);
- if (mem == 0)
- return 0;
- else
- {
- p = mem2chunk(mem);
+ if ((long)n < 0)
+ return 0;
- /* Two optional cases in which clearing not necessary */
+ if (mem == 0)
+ return 0;
+ else {
+ p = mem2chunk(mem);
+ /* Two optional cases in which clearing not necessary */
+ csz = chunksize(p);
+ if (p == oldtop && csz > oldtopsize) {
+ /* clear only the bytes from non-freshly-sbrked memory */
+ csz = oldtopsize;
+ }
- csz = chunksize(p);
-
- if (p == oldtop && csz > oldtopsize)
- {
- /* clear only the bytes from non-freshly-sbrked memory */
- csz = oldtopsize;
- }
-
- memset(mem, 0, csz - SIZE_SZ);
- return mem;
- }
+ memset(mem, 0, csz - SIZE_SZ);
+ return mem;
+ }
}
/*
-
- cfree just calls free. It is needed/defined on some systems
- that pair it with calloc, presumably for odd historical reasons.
-
-*/
-
+ *
+ * cfree just calls free. It is needed/defined on some systems
+ * that pair it with calloc, presumably for odd historical reasons.
+ */
#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
void cfree(void *mem)
{
- free(mem);
+ free(mem);
}
#endif
-
-
/*
-
Malloc_trim gives memory back to the system (via negative
arguments to sbrk) if there is unused memory at the `high' end of
the malloc pool. You can call this after freeing large blocks of
@@ -1971,137 +1853,126 @@ void cfree(void *mem)
from the system.
Malloc_trim returns 1 if it actually released any memory, else 0.
-
*/
#ifdef USE_MALLOC_TRIM
int malloc_trim(size_t pad)
{
- long top_size; /* Amount of top-most memory */
- long extra; /* Amount to release */
- char* current_brk; /* address returned by pre-check sbrk call */
- char* new_brk; /* address returned by negative sbrk call */
-
- unsigned long pagesz = malloc_getpagesize;
-
- top_size = chunksize(top);
- extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
-
- if (extra < (long)pagesz) /* Not enough memory to release */
- return 0;
-
- else
- {
- /* Test to make sure no one else called sbrk */
- current_brk = (char*)(sbrk (0));
- if (current_brk != (char*)(top) + top_size)
- return 0; /* Apparently we don't own memory; must fail */
-
- else
- {
- new_brk = (char*)(sbrk (-extra));
-
- if (new_brk == (char*)(NULL)) /* sbrk failed? */
- {
- /* Try to figure out what we have */
- current_brk = (char*)(sbrk (0));
- top_size = current_brk - (char*)top;
- if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
- {
- sbrked_mem = current_brk - sbrk_base;
- set_head(top, top_size | PREV_INUSE);
+ long top_size; /* Amount of top-most memory */
+ long extra; /* Amount to release */
+ char *current_brk; /* address returned by pre-check sbrk call */
+ char *new_brk; /* address returned by negative sbrk call */
+
+ unsigned long pagesz = malloc_getpagesize;
+
+ top_size = chunksize(top);
+ extra = ((top_size - pad - MINSIZE + (pagesz - 1)) / pagesz -
+ 1) * pagesz;
+
+ if (extra < (long)pagesz) /* Not enough memory to release */
+ return 0;
+
+ else {
+ /* Test to make sure no one else called sbrk */
+ current_brk = (char*)(sbrk(0));
+ if (current_brk != (char*)(top) + top_size)
+ return 0; /* Apparently we don't own memory; must fail */
+
+ else {
+ new_brk = (char *) (sbrk(-extra));
+
+ if (new_brk == (char*)(NULL)) { /* sbrk failed? */
+ /* Try to figure out what we have */
+ current_brk = (char*)(sbrk (0));
+ top_size = current_brk - (char*) top;
+ if (top_size >= (long)MINSIZE) { /* if not, we are very very dead! */
+ sbrked_mem = current_brk - sbrk_base;
+ set_head(top, top_size | PREV_INUSE);
+ }
+ return 0;
+ }
+
+ else {
+ /* Success. Adjust top accordingly. */
+ set_head(top, (top_size - extra) | PREV_INUSE);
+ sbrked_mem -= extra;
+ return 1;
+ }
+ }
}
- return 0;
- }
-
- else
- {
- /* Success. Adjust top accordingly. */
- set_head(top, (top_size - extra) | PREV_INUSE);
- sbrked_mem -= extra;
- return 1;
- }
- }
- }
}
#endif
-
/*
- malloc_usable_size:
-
- This routine tells you how many bytes you can actually use in an
- allocated chunk, which may be more than you requested (although
- often not). You can use this many bytes without worrying about
- overwriting other allocated objects. Not a particularly great
- programming practice, but still sometimes useful.
-
-*/
-
-size_t malloc_usable_size(void* mem)
+ * malloc_usable_size:
+ *
+ * This routine tells you how many bytes you can actually use in an
+ * allocated chunk, which may be more than you requested (although
+ * often not). You can use this many bytes without worrying about
+ * overwriting other allocated objects. Not a particularly great
+ * programming practice, but still sometimes useful.
+ */
+size_t malloc_usable_size(void *mem)
{
- mchunkptr p;
- if (mem == 0)
- return 0;
- else
- {
- p = mem2chunk(mem);
- if(!chunk_is_mmapped(p))
- {
- if (!inuse(p)) return 0;
- return chunksize(p) - SIZE_SZ;
- }
- return chunksize(p) - 2*SIZE_SZ;
- }
+ mchunkptr p;
+
+ if (mem == 0)
+ return 0;
+ else {
+ p = mem2chunk(mem);
+ if (!chunk_is_mmapped(p)) {
+ if (!inuse(p))
+ return 0;
+ return chunksize(p) - SIZE_SZ;
+ }
+ return chunksize(p) - 2 * SIZE_SZ;
+ }
}
+
-
/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
#ifdef CONFIG_CMD_MEMINFO
static void malloc_update_mallinfo(void)
{
- int i;
- mbinptr b;
- mchunkptr p;
+ int i;
+ mbinptr b;
+ mchunkptr p;
+
#ifdef DEBUG
- mchunkptr q;
+ mchunkptr q;
#endif
- INTERNAL_SIZE_T avail = chunksize(top);
- int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
+ INTERNAL_SIZE_T avail = chunksize(top);
+ int navail = ((long)(avail) >= (long)MINSIZE) ? 1 : 0;
- for (i = 1; i < NAV; ++i)
- {
- b = bin_at(i);
- for (p = last(b); p != b; p = p->bk)
- {
+ for (i = 1; i < NAV; ++i) {
+ b = bin_at (i);
+ for (p = last(b); p != b; p = p->bk) {
#ifdef DEBUG
- for (q = next_chunk(p);
- q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
- q = next_chunk(q))
+ for (q = next_chunk(p);
+ q < top && inuse(q)
+ && (long) (chunksize(q)) >= (long)MINSIZE;
+ q = next_chunk(q))
#endif
- avail += chunksize(p);
- navail++;
- }
- }
-
- current_mallinfo.ordblks = navail;
- current_mallinfo.uordblks = sbrked_mem - avail;
- current_mallinfo.fordblks = avail;
+ avail += chunksize(p);
+ navail++;
+ }
+ }
+
+ current_mallinfo.ordblks = navail;
+ current_mallinfo.uordblks = sbrked_mem - avail;
+ current_mallinfo.fordblks = avail;
#if HAVE_MMAP
- current_mallinfo.hblks = n_mmaps;
+ current_mallinfo.hblks = n_mmaps;
#endif
- current_mallinfo.hblkhd = mmapped_mem;
- current_mallinfo.keepcost = chunksize(top);
+ current_mallinfo.hblkhd = mmapped_mem;
+ current_mallinfo.keepcost = chunksize(top);
}
-
-
/*
-
malloc_stats:
Prints on the amount of space obtain from the system (both
@@ -2112,33 +1983,26 @@ static void malloc_update_mallinfo(void)
freed. (Note that this is the number of bytes allocated, not the
number requested. It will be larger than the number requested
because of alignment and bookkeeping overhead.)
-
-*/
-
-
+ */
/*
- mallinfo returns a copy of updated current mallinfo.
-*/
-
-void malloc_stats()
+ * mallinfo returns a copy of updated current mallinfo.
+ */
+void malloc_stats(void)
{
- malloc_update_mallinfo();
- printf("max system bytes = %10u\n",
- (unsigned int)(max_total_mem));
- printf("system bytes = %10u\n",
- (unsigned int)(sbrked_mem + mmapped_mem));
- printf("in use bytes = %10u\n",
- (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
+ malloc_update_mallinfo();
+ printf("max system bytes = %10u\n", (unsigned int)(max_total_mem));
+ printf("system bytes = %10u\n",
+ (unsigned int)(sbrked_mem + mmapped_mem));
+ printf("in use bytes = %10u\n",
+ (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
#if HAVE_MMAP
- fprintf(stderr, "max mmap regions = %10u\n",
- (unsigned int)max_n_mmaps);
+ fprintf(stderr, "max mmap regions = %10u\n",
+ (unsigned int) max_n_mmaps);
#endif
}
#endif /* CONFIG_CMD_MEMINFO */
-
-
/*
mallopt:
@@ -2149,27 +2013,33 @@ void malloc_stats()
and returns 1 if successful else 0.
See descriptions of tunable parameters above.
-
*/
#ifndef __U_BOOT__
int mallopt(int param_number, int value)
{
- switch(param_number)
- {
- case M_TRIM_THRESHOLD:
- trim_threshold = value; return 1;
- case M_TOP_PAD:
- top_pad = value; return 1;
- case M_MMAP_THRESHOLD:
- mmap_threshold = value; return 1;
- case M_MMAP_MAX:
- if (value != 0) return 0; else n_mmaps_max = value; return 1;
-
- default:
- return 0;
- }
+ switch (param_number) {
+ case M_TRIM_THRESHOLD:
+ trim_threshold = value;
+ return 1;
+ case M_TOP_PAD:
+ top_pad = value;
+ return 1;
+ case M_MMAP_THRESHOLD:
+ mmap_threshold = value;
+ return 1;
+ case M_MMAP_MAX:
+ if (value != 0)
+ return 0;
+ else
+ n_mmaps_max = value;
+ return 1;
+
+ default:
+ return 0;
+ }
}
#endif
+
/*
History:
@@ -2281,4 +2151,3 @@ EXPORT_SYMBOL(malloc);
EXPORT_SYMBOL(calloc);
EXPORT_SYMBOL(free);
EXPORT_SYMBOL(realloc);
-