source: git/omalloc/dlmalloc.c @ 0d348b

spielwiese
Last change on this file since 0d348b was 341696, checked in by Hans Schönemann <hannes@…>, 15 years ago
Adding Id property to all files git-svn-id: file:///usr/local/Singular/svn/trunk@12231 2c84dea3-7e68-4137-9b89-c4e89433aadc
  • Property mode set to 100644
File size: 66.7 KB
Line 
1/*******************************************************************
2 *  File:    dlmalloc.h
3 *  Purpose: implementation of Doug Lea's malloc
4 *  This was obtained by taking cutting out the end of malloc.c
5 *
6 *  Version: $Id$
7 *******************************************************************/
8#ifdef HAVE_CONFIG_H
9#include "omMalloc.h"
10#else
11#include "dlmalloc.h"
12#endif
13
14/*
15  Emulation of sbrk for WIN32
16  All code within the ifdef WIN32 is untested by me.
17*/
18
19
20#ifdef WIN32
21
22#define AlignPage(add) (((add) + (malloc_getpagesize-1)) &
23~(malloc_getpagesize-1))
24
25/* resrve 64MB to insure large contiguous space */
26#define RESERVED_SIZE (1024*1024*64)
27#define NEXT_SIZE (2048*1024)
28#define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
29
30struct GmListElement;
31typedef struct GmListElement GmListElement;
32
33struct GmListElement
34{
35        GmListElement* next;
36        void* base;
37};
38
39static GmListElement* head = 0;
40static unsigned int gNextAddress = 0;
41static unsigned int gAddressBase = 0;
42static unsigned int gAllocatedSize = 0;
43
44static
45GmListElement* makeGmListElement (void* bas)
46{
47        GmListElement* this;
48        this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
49        ASSERT (this);
50        if (this)
51        {
52                this->base = bas;
53                this->next = head;
54                head = this;
55        }
56        return this;
57}
58
59void gcleanup ()
60{
61        BOOL rval;
62        ASSERT ( (head == NULL) || (head->base == (void*)gAddressBase));
63        if (gAddressBase && (gNextAddress - gAddressBase))
64        {
65                rval = VirtualFree ((void*)gAddressBase,
66                                                        gNextAddress - gAddressBase,
67                                                        MEM_DECOMMIT);
68        ASSERT (rval);
69        }
70        while (head)
71        {
72                GmListElement* next = head->next;
73                rval = VirtualFree (head->base, 0, MEM_RELEASE);
74                ASSERT (rval);
75                LocalFree (head);
76                head = next;
77        }
78}
79               
80static
81void* findRegion (void* start_address, unsigned long size)
82{
83        MEMORY_BASIC_INFORMATION info;
84        while ((unsigned long)start_address < TOP_MEMORY)
85        {
86                VirtualQuery (start_address, &info, sizeof (info));
87                if (info.State != MEM_FREE)
88                        start_address = (char*)info.BaseAddress + info.RegionSize;
89                else if (info.RegionSize >= size)
90                        return start_address;
91                else
92                        start_address = (char*)info.BaseAddress + info.RegionSize;
93        }
94        return NULL;
95       
96}
97
98
99void* wsbrk (long size)
100{
101        void* tmp;
102        if (size > 0)
103        {
104                if (gAddressBase == 0)
105                {
106                        gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
107                        gNextAddress = gAddressBase =
108                                (unsigned int)VirtualAlloc (NULL, gAllocatedSize,
109                                                                                        MEM_RESERVE, PAGE_NOACCESS);
110                } else if (AlignPage (gNextAddress + size) > (gAddressBase +
111gAllocatedSize))
112                {
113                        long new_size = max (NEXT_SIZE, AlignPage (size));
114                        void* new_address = (void*)(gAddressBase+gAllocatedSize);
115                        do
116                        {
117                                new_address = findRegion (new_address, new_size);
118                               
119                                if (new_address == 0)
120                                        return (void*)-1;
121
122                                gAddressBase = gNextAddress =
123                                        (unsigned int)VirtualAlloc (new_address, new_size,
124                                                                                                MEM_RESERVE, PAGE_NOACCESS);
125                                // repeat in case of race condition
126                                // The region that we found has been snagged
127                                // by another thread
128                        }
129                        while (gAddressBase == 0);
130
131                        ASSERT (new_address == (void*)gAddressBase);
132
133                        gAllocatedSize = new_size;
134
135                        if (!makeGmListElement ((void*)gAddressBase))
136                                return (void*)-1;
137                }
138                if ((size + gNextAddress) > AlignPage (gNextAddress))
139                {
140                        void* res;
141                        res = VirtualAlloc ((void*)AlignPage (gNextAddress),
142                                                                (size + gNextAddress -
143                                                                 AlignPage (gNextAddress)),
144                                                                MEM_COMMIT, PAGE_READWRITE);
145                        if (res == 0)
146                                return (void*)-1;
147                }
148                tmp = (void*)gNextAddress;
149                gNextAddress = (unsigned int)tmp + size;
150                return tmp;
151        }
152        else if (size < 0)
153        {
154                unsigned int alignedGoal = AlignPage (gNextAddress + size);
155                /* Trim by releasing the virtual memory */
156                if (alignedGoal >= gAddressBase)
157                {
158                        VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
159                                                 MEM_DECOMMIT);
160                        gNextAddress = gNextAddress + size;
161                        return (void*)gNextAddress;
162                }
163                else
164                {
165                        VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
166                                                 MEM_DECOMMIT);
167                        gNextAddress = gAddressBase;
168                        return (void*)-1;
169                }
170        }
171        else
172        {
173                return (void*)gNextAddress;
174        }
175}
176
177#endif
178
179
180
181/*
182  Type declarations
183*/
184
185
186struct malloc_chunk
187{
188  INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
189  INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */
190  struct malloc_chunk* fd;   /* double links -- used only if free. */
191  struct malloc_chunk* bk;
192};
193
194typedef struct malloc_chunk* mchunkptr;
195
196/*
197
198   malloc_chunk details:
199
200    (The following includes lightly edited explanations by Colin Plumb.)
201
202    Chunks of memory are maintained using a `boundary tag' method as
203    described in e.g., Knuth or Standish.  (See the paper by Paul
204    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
205    survey of such techniques.)  Sizes of free chunks are stored both
206    in the front of each chunk and at the end.  This makes
207    consolidating fragmented chunks into bigger chunks very fast.  The
208    size fields also hold bits representing whether chunks are free or
209    in use.
210
211    An allocated chunk looks like this:
212
213
214    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
215            |             Size of previous chunk, if allocated            | |
216            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
217            |             Size of chunk, in bytes                         |P|
218      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
219            |             User data starts here...                          .
220            .                                                               .
221            .             (malloc_usable_space() bytes)                     .
222            .                                                               |
223nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
224            |             Size of chunk                                     |
225            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
226
227
228    Where "chunk" is the front of the chunk for the purpose of most of
229    the malloc code, but "mem" is the pointer that is returned to the
230    user.  "Nextchunk" is the beginning of the next contiguous chunk.
231
232    Chunks always begin on even word boundries, so the mem portion
233    (which is returned to the user) is also on an even word boundary, and
234    thus double-word aligned.
235
236    Free chunks are stored in circular doubly-linked lists, and look like this:
237
238    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
239            |             Size of previous chunk                            |
240            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
241    `head:' |             Size of chunk, in bytes                         |P|
242      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
243            |             Forward pointer to next chunk in list             |
244            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
245            |             Back pointer to previous chunk in list            |
246            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
247            |             Unused space (may be 0 bytes long)                .
248            .                                                               .
249            .                                                               |
250nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
251    `foot:' |             Size of chunk, in bytes                           |
252            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
253
254    The P (PREV_INUSE) bit, stored in the unused low-order bit of the
255    chunk size (which is always a multiple of two words), is an in-use
256    bit for the *previous* chunk.  If that bit is *clear*, then the
257    word before the current chunk size contains the previous chunk
258    size, and can be used to find the front of the previous chunk.
259    (The very first chunk allocated always has this bit set,
260    preventing access to non-existent (or non-owned) memory.)
261
262    Note that the `foot' of the current chunk is actually represented
263    as the prev_size of the NEXT chunk. (This makes it easier to
264    deal with alignments etc).
265
266    The two exceptions to all this are
267
268     1. The special chunk `top', which doesn't bother using the
269        trailing size field since there is no
270        next contiguous chunk that would have to index off it. (After
271        initialization, `top' is forced to always exist.  If it would
272        become less than MINSIZE bytes long, it is replenished via
273        malloc_extend_top.)
274
275     2. Chunks allocated via mmap, which have the second-lowest-order
276        bit (IS_MMAPPED) set in their size fields.  Because they are
277        never merged or traversed from any other chunk, they have no
278        foot size or inuse information.
279
280    Available chunks are kept in any of several places (all declared below):
281
282    * `av': An array of chunks serving as bin headers for consolidated
283       chunks. Each bin is doubly linked.  The bins are approximately
284       proportionally (log) spaced.  There are a lot of these bins
285       (128). This may look excessive, but works very well in
286       practice.  All procedures maintain the invariant that no
287       consolidated chunk physically borders another one. Chunks in
288       bins are kept in size order, with ties going to the
289       approximately least recently used chunk.
290
291       The chunks in each bin are maintained in decreasing sorted order by
292       size.  This is irrelevant for the small bins, which all contain
293       the same-sized chunks, but facilitates best-fit allocation for
294       larger chunks. (These lists are just sequential. Keeping them in
295       order almost never requires enough traversal to warrant using
296       fancier ordered data structures.)  Chunks of the same size are
297       linked with the most recently freed at the front, and allocations
298       are taken from the back.  This results in LRU or FIFO allocation
299       order, which tends to give each chunk an equal opportunity to be
300       consolidated with adjacent freed chunks, resulting in larger free
301       chunks and less fragmentation.
302
303    * `top': The top-most available chunk (i.e., the one bordering the
304       end of available memory) is treated specially. It is never
305       included in any bin, is used only if no other chunk is
306       available, and is released back to the system if it is very
307       large (see M_TRIM_THRESHOLD).
308
309    * `last_remainder': A bin holding only the remainder of the
310       most recently split (non-top) chunk. This bin is checked
311       before other non-fitting chunks, so as to provide better
312       locality for runs of sequentially allocated chunks.
313
314    *  Implicitly, through the host system's memory mapping tables.
315       If supported, requests greater than a threshold are usually
316       serviced via calls to mmap, and then later released via munmap.
317
318*/
319
320
321
322
323
324
325/*  sizes, alignments */
326
327#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
328#define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
329#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
330#define MINSIZE                (sizeof(struct malloc_chunk))
331
332/* conversion from malloc headers to user pointers, and back */
333
334#define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
335#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
336
337/* pad request bytes into a usable size */
338
339#define request2size(req) \
340 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
341  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
342   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
343
344/* Check if m has acceptable alignment */
345
346#define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
347
348
349
350
351/*
352  Physical chunk operations
353*/
354
355
356/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
357
358#define PREV_INUSE 0x1
359
360/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
361
362#define IS_MMAPPED 0x2
363
364/* Bits to mask off when extracting size */
365
366#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
367
368
369/* Ptr to next physical malloc_chunk. */
370
371#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
372
373/* Ptr to previous physical malloc_chunk */
374
375#define prev_chunk(p)\
376   ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
377
378
379/* Treat space at ptr + offset as a chunk */
380
381#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
382
383
384
385
386/*
387  Dealing with use bits
388*/
389
390/* extract p's inuse bit */
391
392#define inuse(p)\
393((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
394
395/* extract inuse bit of previous chunk */
396
397#define prev_inuse(p)  ((p)->size & PREV_INUSE)
398
399/* check for mmap()'ed chunk */
400
401#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
402
403/* set/clear chunk as in use without otherwise disturbing */
404
405#define set_inuse(p)\
406((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
407
408#define clear_inuse(p)\
409((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
410
411/* check/set/clear inuse bits in known places */
412
413#define inuse_bit_at_offset(p, s)\
414 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
415
416#define set_inuse_bit_at_offset(p, s)\
417 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
418
419#define clear_inuse_bit_at_offset(p, s)\
420 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
421
422
423
424
425/*
426  Dealing with size fields
427*/
428
429/* Get size, ignoring use bits */
430
431#define chunksize(p)          ((p)->size & ~(SIZE_BITS))
432
433/* Set size at head, without disturbing its use bit */
434
435#define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
436
437/* Set size/use ignoring previous bits in header */
438
439#define set_head(p, s)        ((p)->size = (s))
440
441/* Set size at footer (only when chunk is not in use) */
442
443#define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
444
445
446
447
448
449/*
450   Bins
451
452    The bins, `av_' are an array of pairs of pointers serving as the
453    heads of (initially empty) doubly-linked lists of chunks, laid out
454    in a way so that each pair can be treated as if it were in a
455    malloc_chunk. (This way, the fd/bk offsets for linking bin heads
456    and chunks are the same).
457
458    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
459    8 bytes apart. Larger bins are approximately logarithmically
460    spaced. (See the table below.) The `av_' array is never mentioned
461    directly in the code, but instead via bin access macros.
462
463    Bin layout:
464
465    64 bins of size       8
466    32 bins of size      64
467    16 bins of size     512
468     8 bins of size    4096
469     4 bins of size   32768
470     2 bins of size  262144
471     1 bin  of size what's left
472
473    There is actually a little bit of slop in the numbers in bin_index
474    for the sake of speed. This makes no difference elsewhere.
475
476    The special chunks `top' and `last_remainder' get their own bins,
477    (this is implemented via yet more trickery with the av_ array),
478    although `top' is never properly linked to its bin since it is
479    always handled specially.
480
481*/
482
483#define NAV             128   /* number of bins */
484
485typedef struct malloc_chunk* mbinptr;
486
487/* access macros */
488
489#define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
490#define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
491#define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
492
493/*
494   The first 2 bins are never indexed. The corresponding av_ cells are instead
495   used for bookkeeping. This is not to save space, but to simplify
496   indexing, maintain locality, and avoid some initialization tests.
497*/
498
499#define top            (bin_at(0)->fd)   /* The topmost chunk */
500#define last_remainder (bin_at(1))       /* remainder from last split */
501
502
503/*
504   Because top initially points to its own bin with initial
505   zero size, thus forcing extension on the first malloc request,
506   we avoid having any special code in malloc to check whether
507   it even exists yet. But we still need to in malloc_extend_top.
508*/
509
510#define initial_top    ((mchunkptr)(bin_at(0)))
511
512/* Helper macro to initialize bins */
513
514#define IAV(i)  bin_at(i), bin_at(i)
515
516static mbinptr av_[NAV * 2 + 2] = {
517 0, 0,
518 IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
519 IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
520 IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
521 IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
522 IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
523 IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
524 IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
525 IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
526 IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
527 IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
528 IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
529 IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
530 IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
531 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
532 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
533 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
534};
535
536
537
538/* field-extraction macros */
539
540#define first(b) ((b)->fd)
541#define last(b)  ((b)->bk)
542
543/*
544  Indexing into bins
545*/
546
547#define bin_index(sz)                                                          \
548(((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
549 ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
550 ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
551 ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
552 ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
553 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
554                                          126)
555/*
556  bins for chunks < 512 are all spaced 8 bytes apart, and hold
557  identically sized chunks. This is exploited in malloc.
558*/
559
560#define MAX_SMALLBIN         63
561#define MAX_SMALLBIN_SIZE   512
562#define SMALLBIN_WIDTH        8
563
564#define smallbin_index(sz)  (((unsigned long)(sz)) >> 3)
565
566/*
567   Requests are `small' if both the corresponding and the next bin are small
568*/
569
570#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
571
572
573
574/*
575    To help compensate for the large number of bins, a one-level index
576    structure is used for bin-by-bin searching.  `binblocks' is a
577    one-word bitvector recording whether groups of BINBLOCKWIDTH bins
578    have any (possibly) non-empty bins, so they can be skipped over
579    all at once during during traversals. The bits are NOT always
580    cleared as soon as all bins in a block are empty, but instead only
581    when all are noticed to be empty during traversal in malloc.
582*/
583
584#define BINBLOCKWIDTH     4   /* bins per block */
585
586#define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
587
588/* bin<->block macros */
589
590#define idx2binblock(ix)    ((unsigned)1 << (ix / BINBLOCKWIDTH))
591#define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
592#define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
593
594
595
596
597
598/*  Other static bookkeeping data */
599
600/* variables holding tunable values */
601
602static unsigned long trim_threshold   = DEFAULT_TRIM_THRESHOLD;
603static unsigned long top_pad          = DEFAULT_TOP_PAD;
604static unsigned int  n_mmaps_max      = DEFAULT_MMAP_MAX;
605static unsigned long mmap_threshold   = DEFAULT_MMAP_THRESHOLD;
606
607/* The first value returned from sbrk */
608static char* sbrk_base = (char*)(-1);
609
610/* The maximum memory obtained from system via sbrk */
611unsigned long max_sbrked_mem = 0;
612
613/* The maximum via either sbrk or mmap */
614unsigned long max_total_mem = 0;
615
616/* internal working copy of mallinfo */
617struct mallinfo current_mallinfo = {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
618
619/* The total memory obtained from system via sbrk */
620#define sbrked_mem  (current_mallinfo.arena)
621
622/* Tracking mmaps */
623
624static unsigned int n_mmaps = 0;
625static unsigned int max_n_mmaps = 0;
626/* obachman: rm'ed static */
627unsigned long mmapped_mem = 0;
628unsigned long max_mmapped_mem = 0;
629
630
631
632/*
633  Debugging support
634*/
635
636#if DEBUG
637
638
639/*
640  These routines make a number of assertions about the states
641  of data structures that should be true at all times. If any
642  are not true, it's very likely that a user program has somehow
643  trashed memory. (It's also possible that there is a coding error
644  in malloc. In which case, please report it!)
645*/
646
647#if __STD_C
648static void do_check_chunk(mchunkptr p)
649#else
650static void do_check_chunk(p) mchunkptr p;
651#endif
652{
653  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
654
655  /* No checkable chunk is mmapped */
656  assert(!chunk_is_mmapped(p));
657
658  /* Check for legal address ... */
659  assert((char*)p >= sbrk_base);
660  if (p != top)
661    assert((char*)p + sz <= (char*)top);
662  else
663    assert((char*)p + sz <= sbrk_base + sbrked_mem);
664
665}
666
667
668#if __STD_C
669static void do_check_free_chunk(mchunkptr p)
670#else
671static void do_check_free_chunk(p) mchunkptr p;
672#endif
673{
674  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
675  mchunkptr next = chunk_at_offset(p, sz);
676
677  do_check_chunk(p);
678
679  /* Check whether it claims to be free ... */
680  assert(!inuse(p));
681
682  /* Unless a special marker, must have OK fields */
683  if ((long)sz >= (long)MINSIZE)
684  {
685    assert((sz & MALLOC_ALIGN_MASK) == 0);
686    assert(aligned_OK(chunk2mem(p)));
687    /* ... matching footer field */
688    assert(next->prev_size == sz);
689    /* ... and is fully consolidated */
690    assert(prev_inuse(p));
691    assert (next == top || inuse(next));
692
693    /* ... and has minimally sane links */
694    assert(p->fd->bk == p);
695    assert(p->bk->fd == p);
696  }
697  else /* markers are always of size SIZE_SZ */
698    assert(sz == SIZE_SZ);
699}
700
701#if __STD_C
702static void do_check_inuse_chunk(mchunkptr p)
703#else
704static void do_check_inuse_chunk(p) mchunkptr p;
705#endif
706{
707  mchunkptr next = next_chunk(p);
708  do_check_chunk(p);
709
710  /* Check whether it claims to be in use ... */
711  assert(inuse(p));
712
713  /* ... and is surrounded by OK chunks.
714    Since more things can be checked with free chunks than inuse ones,
715    if an inuse chunk borders them and debug is on, it's worth doing them.
716  */
717  if (!prev_inuse(p))
718  {
719    mchunkptr prv = prev_chunk(p);
720    assert(next_chunk(prv) == p);
721    do_check_free_chunk(prv);
722  }
723  if (next == top)
724  {
725    assert(prev_inuse(next));
726    assert(chunksize(next) >= MINSIZE);
727  }
728  else if (!inuse(next))
729    do_check_free_chunk(next);
730
731}
732
733#if __STD_C
734static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
735#else
736static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
737#endif
738{
739  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
740  long room = sz - s;
741
742  do_check_inuse_chunk(p);
743
744  /* Legal size ... */
745  assert((long)sz >= (long)MINSIZE);
746  assert((sz & MALLOC_ALIGN_MASK) == 0);
747  assert(room >= 0);
748  assert(room < (long)MINSIZE);
749
750  /* ... and alignment */
751  assert(aligned_OK(chunk2mem(p)));
752
753
754  /* ... and was allocated at front of an available chunk */
755  assert(prev_inuse(p));
756
757}
758
759
760#define check_free_chunk(P)  do_check_free_chunk(P)
761#define check_inuse_chunk(P) do_check_inuse_chunk(P)
762#define check_chunk(P) do_check_chunk(P)
763#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
764#else
765#define check_free_chunk(P)
766#define check_inuse_chunk(P)
767#define check_chunk(P)
768#define check_malloced_chunk(P,N)
769#endif
770
771
772
773/*
774  Macro-based internal utilities
775*/
776
777
778/*
779  Linking chunks in bin lists.
780  Call these only with variables, not arbitrary expressions, as arguments.
781*/
782
783/*
784  Place chunk p of size s in its bin, in size order,
785  putting it ahead of others of same size.
786*/
787
788
789#define frontlink(P, S, IDX, BK, FD)                                          \
790{                                                                             \
791  if (S < MAX_SMALLBIN_SIZE)                                                  \
792  {                                                                           \
793    IDX = smallbin_index(S);                                                  \
794    mark_binblock(IDX);                                                       \
795    BK = bin_at(IDX);                                                         \
796    FD = BK->fd;                                                              \
797    P->bk = BK;                                                               \
798    P->fd = FD;                                                               \
799    FD->bk = BK->fd = P;                                                      \
800  }                                                                           \
801  else                                                                        \
802  {                                                                           \
803    IDX = bin_index(S);                                                       \
804    BK = bin_at(IDX);                                                         \
805    FD = BK->fd;                                                              \
806    if (FD == BK) mark_binblock(IDX);                                         \
807    else                                                                      \
808    {                                                                         \
809      while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
810      BK = FD->bk;                                                            \
811    }                                                                         \
812    P->bk = BK;                                                               \
813    P->fd = FD;                                                               \
814    FD->bk = BK->fd = P;                                                      \
815  }                                                                           \
816}
817
818
819/* take a chunk off a list */
820
821#define unlink(P, BK, FD)                                                     \
822{                                                                             \
823  BK = P->bk;                                                                 \
824  FD = P->fd;                                                                 \
825  FD->bk = BK;                                                                \
826  BK->fd = FD;                                                                \
827}                                                                             \
828
829/* Place p as the last remainder */
830
831#define link_last_remainder(P)                                                \
832{                                                                             \
833  last_remainder->fd = last_remainder->bk =  P;                               \
834  P->fd = P->bk = last_remainder;                                             \
835}
836
837/* Clear the last_remainder bin */
838
839#define clear_last_remainder \
840  (last_remainder->fd = last_remainder->bk = last_remainder)
841
842
843
844
845
846
847/* Routines dealing with mmap(). */
848
849#if HAVE_MMAP
850
851#if __STD_C
852static mchunkptr mmap_chunk(size_t size)
853#else
854static mchunkptr mmap_chunk(size) size_t size;
855#endif
856{
857  size_t page_mask = malloc_getpagesize - 1;
858  mchunkptr p;
859
860#ifndef MAP_ANONYMOUS
861  static int fd = -1;
862#endif
863
864  if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
865
866  /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
867   * there is no following chunk whose prev_size field could be used.
868   */
869  size = (size + SIZE_SZ + page_mask) & ~page_mask;
870
871#ifdef MAP_ANONYMOUS
872  p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
873                      MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
874#else /* !MAP_ANONYMOUS */
875  if (fd < 0)
876  {
877    fd = open("/dev/zero", O_RDWR);
878    if(fd < 0) return 0;
879  }
880  p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
881#endif
882
883  if(p == (mchunkptr)-1) return 0;
884
885  n_mmaps++;
886  if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
887
888  /* We demand that eight bytes into a page must be 8-byte aligned. */
889  assert(aligned_OK(chunk2mem(p)));
890
891  /* The offset to the start of the mmapped region is stored
892   * in the prev_size field of the chunk; normally it is zero,
893   * but that can be changed in memalign().
894   */
895  p->prev_size = 0;
896  set_head(p, size|IS_MMAPPED);
897
898  mmapped_mem += size;
899  if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
900    max_mmapped_mem = mmapped_mem;
901  if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
902    max_total_mem = mmapped_mem + sbrked_mem;
903  return p;
904}
905
906#if __STD_C
907static void munmap_chunk(mchunkptr p)
908#else
909static void munmap_chunk(p) mchunkptr p;
910#endif
911{
912  INTERNAL_SIZE_T size = chunksize(p);
913  int ret;
914
915  assert (chunk_is_mmapped(p));
916  assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
917  assert((n_mmaps > 0));
918  assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
919
920  n_mmaps--;
921  mmapped_mem -= (size + p->prev_size);
922
923  ret = munmap((char *)p - p->prev_size, size + p->prev_size);
924
925  /* munmap returns non-zero on failure */
926  assert(ret == 0);
927}
928
929#if HAVE_MREMAP
930
931#if __STD_C
932static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
933#else
934static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
935#endif
936{
937  size_t page_mask = malloc_getpagesize - 1;
938  INTERNAL_SIZE_T offset = p->prev_size;
939  INTERNAL_SIZE_T size = chunksize(p);
940  char *cp;
941
942  assert (chunk_is_mmapped(p));
943  assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
944  assert((n_mmaps > 0));
945  assert(((size + offset) & (malloc_getpagesize-1)) == 0);
946
947  /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
948  new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
949
950  cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
951
952  if (cp == (char *)-1) return 0;
953
954  p = (mchunkptr)(cp + offset);
955
956  assert(aligned_OK(chunk2mem(p)));
957
958  assert((p->prev_size == offset));
959  set_head(p, (new_size - offset)|IS_MMAPPED);
960
961  mmapped_mem -= size + offset;
962  mmapped_mem += new_size;
963  if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
964    max_mmapped_mem = mmapped_mem;
965  if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
966    max_total_mem = mmapped_mem + sbrked_mem;
967  return p;
968}
969
970#endif /* HAVE_MREMAP */
971
972#endif /* HAVE_MMAP */
973
974
975
976
977/*
978  Extend the top-most chunk by obtaining memory from system.
979  Main interface to sbrk (but see also malloc_trim).
980*/
981
982#if __STD_C
983static void malloc_extend_top(INTERNAL_SIZE_T nb)
984#else
985static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
986#endif
987{
988  char*     brk;                  /* return value from sbrk */
989  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
990  INTERNAL_SIZE_T correction;     /* bytes for 2nd sbrk call */
991  char*     new_brk;              /* return of 2nd sbrk call */
992  INTERNAL_SIZE_T top_size;       /* new size of top chunk */
993
994  mchunkptr old_top     = top;  /* Record state of old top */
995  INTERNAL_SIZE_T old_top_size = chunksize(old_top);
996  char*     old_end      = (char*)(chunk_at_offset(old_top, old_top_size));
997
998  /* Pad request with top_pad plus minimal overhead */
999
1000  INTERNAL_SIZE_T    sbrk_size     = nb + top_pad + MINSIZE;
1001  unsigned long pagesz    = malloc_getpagesize;
1002
1003  /* If not the first time through, round to preserve page boundary */
1004  /* Otherwise, we need to correct to a page size below anyway. */
1005  /* (We also correct below if an intervening foreign sbrk call.) */
1006
1007  if (sbrk_base != (char*)(-1))
1008    sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1009
1010  brk = (char*)(MORECORE (sbrk_size));
1011
1012  /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1013  if (brk == (char*)(MORECORE_FAILURE) ||
1014      (brk < old_end && old_top != initial_top))
1015    return;
1016
1017  sbrked_mem += sbrk_size;
1018
1019  if (brk == old_end) /* can just add bytes to current top */
1020  {
1021    top_size = sbrk_size + old_top_size;
1022    set_head(top, top_size | PREV_INUSE);
1023  }
1024  else
1025  {
1026    if (sbrk_base == (char*)(-1))  /* First time through. Record base */
1027      sbrk_base = brk;
1028    else  /* Someone else called sbrk().  Count those bytes as sbrked_mem. */
1029      sbrked_mem += brk - (char*)old_end;
1030
1031    /* Guarantee alignment of first new chunk made from this space */
1032    front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1033    if (front_misalign > 0)
1034    {
1035      correction = (MALLOC_ALIGNMENT) - front_misalign;
1036      brk += correction;
1037    }
1038    else
1039      correction = 0;
1040
1041    /* Guarantee the next brk will be at a page boundary */
1042    correction += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
1043
1044    /* Allocate correction */
1045    new_brk = (char*)(MORECORE (correction));
1046    if (new_brk == (char*)(MORECORE_FAILURE)) return;
1047
1048    sbrked_mem += correction;
1049
1050    top = (mchunkptr)brk;
1051    top_size = new_brk - brk + correction;
1052    set_head(top, top_size | PREV_INUSE);
1053
1054    if (old_top != initial_top)
1055    {
1056
1057      /* There must have been an intervening foreign sbrk call. */
1058      /* A double fencepost is necessary to prevent consolidation */
1059
1060      /* If not enough space to do this, then user did something very wrong */
1061      if (old_top_size < MINSIZE)
1062      {
1063        set_head(top, PREV_INUSE); /* will force null return from malloc */
1064        return;
1065      }
1066
1067      /* Also keep size a multiple of MALLOC_ALIGNMENT */
1068      old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1069      set_head_size(old_top, old_top_size);
1070      chunk_at_offset(old_top, old_top_size          )->size =
1071        SIZE_SZ|PREV_INUSE;
1072      chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
1073        SIZE_SZ|PREV_INUSE;
1074      /* If possible, release the rest. */
1075      if (old_top_size >= MINSIZE)
1076        fREe(chunk2mem(old_top));
1077    }
1078  }
1079
1080  if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1081    max_sbrked_mem = sbrked_mem;
1082  if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1083    max_total_mem = mmapped_mem + sbrked_mem;
1084
1085  /* We always land on a page boundary */
1086  assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1087}
1088
1089
1090
1091
1092/* Main public routines */
1093
1094
1095/*
1096  Malloc Algorthim:
1097
1098    The requested size is first converted into a usable form, `nb'.
1099    This currently means to add 4 bytes overhead plus possibly more to
1100    obtain 8-byte alignment and/or to obtain a size of at least
1101    MINSIZE (currently 16 bytes), the smallest allocatable size.
1102    (All fits are considered `exact' if they are within MINSIZE bytes.)
1103
1104    From there, the first successful of the following steps is taken:
1105
1106      1. The bin corresponding to the request size is scanned, and if
1107         a chunk of exactly the right size is found, it is taken.
1108
1109      2. The most recently remaindered chunk is used if it is big
1110         enough.  This is a form of (roving) first fit, used only in
1111         the absence of exact fits. Runs of consecutive requests use
1112         the remainder of the chunk used for the previous such request
1113         whenever possible. This limited use of a first-fit style
1114         allocation strategy tends to give contiguous chunks
1115         coextensive lifetimes, which improves locality and can reduce
1116         fragmentation in the long run.
1117
1118      3. Other bins are scanned in increasing size order, using a
1119         chunk big enough to fulfill the request, and splitting off
1120         any remainder.  This search is strictly by best-fit; i.e.,
1121         the smallest (with ties going to approximately the least
1122         recently used) chunk that fits is selected.
1123
1124      4. If large enough, the chunk bordering the end of memory
1125         (`top') is split off. (This use of `top' is in accord with
1126         the best-fit search rule.  In effect, `top' is treated as
1127         larger (and thus less well fitting) than any other available
1128         chunk since it can be extended to be as large as necessary
1129         (up to system limitations).
1130
1131      5. If the request size meets the mmap threshold and the
1132         system supports mmap, and there are few enough currently
1133         allocated mmapped regions, and a call to mmap succeeds,
1134         the request is allocated via direct memory mapping.
1135
1136      6. Otherwise, the top of memory is extended by
1137         obtaining more space from the system (normally using sbrk,
1138         but definable to anything else via the MORECORE macro).
1139         Memory is gathered from the system (in system page-sized
1140         units) in a way that allows chunks obtained across different
1141         sbrk calls to be consolidated, but does not require
1142         contiguous memory. Thus, it should be safe to intersperse
1143         mallocs with other sbrk calls.
1144
1145
1146      All allocations are made from the the `lowest' part of any found
1147      chunk. (The implementation invariant is that prev_inuse is
1148      always true of any allocated chunk; i.e., that each allocated
1149      chunk borders either a previously allocated and still in-use chunk,
1150      or the base of its memory arena.)
1151
1152*/
1153
1154#if __STD_C
1155Void_t* mALLOc(size_t bytes)
1156#else
1157Void_t* mALLOc(bytes) size_t bytes;
1158#endif
1159{
1160  mchunkptr victim;                  /* inspected/selected chunk */
1161  INTERNAL_SIZE_T victim_size;       /* its size */
1162  int       idx;                     /* index for bin traversal */
1163  mbinptr   bin;                     /* associated bin */
1164  mchunkptr remainder;               /* remainder from a split */
1165  long      remainder_size;          /* its size */
1166  int       remainder_index;         /* its bin index */
1167  unsigned long block;               /* block traverser bit */
1168  int       startidx;                /* first bin of a traversed block */
1169  mchunkptr fwd;                     /* misc temp for linking */
1170  mchunkptr bck;                     /* misc temp for linking */
1171  mbinptr q;                         /* misc temp */
1172
1173  INTERNAL_SIZE_T nb  = request2size(bytes);  /* padded request size; */
1174
1175  /* Check for exact match in a bin */
1176
1177  if (is_small_request(nb))  /* Faster version for small requests */
1178  {
1179    idx = smallbin_index(nb);
1180
1181    /* No traversal or size check necessary for small bins.  */
1182
1183    q = bin_at(idx);
1184    victim = last(q);
1185
1186    /* Also scan the next one, since it would have a remainder < MINSIZE */
1187    if (victim == q)
1188    {
1189      q = next_bin(q);
1190      victim = last(q);
1191    }
1192    if (victim != q)
1193    {
1194      victim_size = chunksize(victim);
1195      unlink(victim, bck, fwd);
1196      set_inuse_bit_at_offset(victim, victim_size);
1197      check_malloced_chunk(victim, nb);
1198      return chunk2mem(victim);
1199    }
1200
1201    idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1202
1203  }
1204  else
1205  {
1206    idx = bin_index(nb);
1207    bin = bin_at(idx);
1208
1209    for (victim = last(bin); victim != bin; victim = victim->bk)
1210    {
1211      victim_size = chunksize(victim);
1212      remainder_size = victim_size - nb;
1213
1214      if (remainder_size >= (long)MINSIZE) /* too big */
1215      {
1216        --idx; /* adjust to rescan below after checking last remainder */
1217        break;
1218      }
1219
1220      else if (remainder_size >= 0) /* exact fit */
1221      {
1222        unlink(victim, bck, fwd);
1223        set_inuse_bit_at_offset(victim, victim_size);
1224        check_malloced_chunk(victim, nb);
1225        return chunk2mem(victim);
1226      }
1227    }
1228
1229    ++idx;
1230
1231  }
1232
1233  /* Try to use the last split-off remainder */
1234
1235  if ( (victim = last_remainder->fd) != last_remainder)
1236  {
1237    victim_size = chunksize(victim);
1238    remainder_size = victim_size - nb;
1239
1240    if (remainder_size >= (long)MINSIZE) /* re-split */
1241    {
1242      remainder = chunk_at_offset(victim, nb);
1243      set_head(victim, nb | PREV_INUSE);
1244      link_last_remainder(remainder);
1245      set_head(remainder, remainder_size | PREV_INUSE);
1246      set_foot(remainder, remainder_size);
1247      check_malloced_chunk(victim, nb);
1248      return chunk2mem(victim);
1249    }
1250
1251    clear_last_remainder;
1252
1253    if (remainder_size >= 0)  /* exhaust */
1254    {
1255      set_inuse_bit_at_offset(victim, victim_size);
1256      check_malloced_chunk(victim, nb);
1257      return chunk2mem(victim);
1258    }
1259
1260    /* Else place in bin */
1261
1262    frontlink(victim, victim_size, remainder_index, bck, fwd);
1263  }
1264
1265  /*
1266     If there are any possibly nonempty big-enough blocks,
1267     search for best fitting chunk by scanning bins in blockwidth units.
1268  */
1269
1270  if ( (block = idx2binblock(idx)) <= binblocks)
1271  {
1272
1273    /* Get to the first marked block */
1274
1275    if ( (block & binblocks) == 0)
1276    {
1277      /* force to an even block boundary */
1278      idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1279      block <<= 1;
1280      while ((block & binblocks) == 0)
1281      {
1282        idx += BINBLOCKWIDTH;
1283        block <<= 1;
1284      }
1285    }
1286
1287    /* For each possibly nonempty block ... */
1288    for (;;)
1289    {
1290      startidx = idx;          /* (track incomplete blocks) */
1291      q = bin = bin_at(idx);
1292
1293      /* For each bin in this block ... */
1294      do
1295      {
1296        /* Find and use first big enough chunk ... */
1297
1298        for (victim = last(bin); victim != bin; victim = victim->bk)
1299        {
1300          victim_size = chunksize(victim);
1301          remainder_size = victim_size - nb;
1302
1303          if (remainder_size >= (long)MINSIZE) /* split */
1304          {
1305            remainder = chunk_at_offset(victim, nb);
1306            set_head(victim, nb | PREV_INUSE);
1307            unlink(victim, bck, fwd);
1308            link_last_remainder(remainder);
1309            set_head(remainder, remainder_size | PREV_INUSE);
1310            set_foot(remainder, remainder_size);
1311            check_malloced_chunk(victim, nb);
1312            return chunk2mem(victim);
1313          }
1314
1315          else if (remainder_size >= 0)  /* take */
1316          {
1317            set_inuse_bit_at_offset(victim, victim_size);
1318            unlink(victim, bck, fwd);
1319            check_malloced_chunk(victim, nb);
1320            return chunk2mem(victim);
1321          }
1322
1323        }
1324
1325       bin = next_bin(bin);
1326
1327      } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1328
1329      /* Clear out the block bit. */
1330
1331      do   /* Possibly backtrack to try to clear a partial block */
1332      {
1333        if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1334        {
1335          binblocks &= ~block;
1336          break;
1337        }
1338        --startidx;
1339       q = prev_bin(q);
1340      } while (first(q) == q);
1341
1342      /* Get to the next possibly nonempty block */
1343
1344      if ( (block <<= 1) <= binblocks && (block != 0) )
1345      {
1346        while ((block & binblocks) == 0)
1347        {
1348          idx += BINBLOCKWIDTH;
1349          block <<= 1;
1350        }
1351      }
1352      else
1353        break;
1354    }
1355  }
1356
1357
1358  /* Try to use top chunk */
1359
1360  /* Require that there be a remainder, ensuring top always exists  */
1361  if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1362  {
1363
1364#if HAVE_MMAP
1365    /* If big and would otherwise need to extend, try to use mmap instead */
1366    if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
1367        (victim = mmap_chunk(nb)) != 0)
1368      return chunk2mem(victim);
1369#endif
1370
1371    /* Try to extend */
1372    malloc_extend_top(nb);
1373    if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1374      return 0; /* propagate failure */
1375  }
1376
1377  victim = top;
1378  set_head(victim, nb | PREV_INUSE);
1379  top = chunk_at_offset(victim, nb);
1380  set_head(top, remainder_size | PREV_INUSE);
1381  check_malloced_chunk(victim, nb);
1382  return chunk2mem(victim);
1383
1384}
1385
1386
1387
1388
1389/*
1390
1391  free() algorithm :
1392
1393    cases:
1394
1395       1. free(0) has no effect.
1396
1397       2. If the chunk was allocated via mmap, it is release via munmap().
1398
1399       3. If a returned chunk borders the current high end of memory,
1400          it is consolidated into the top, and if the total unused
1401          topmost memory exceeds the trim threshold, malloc_trim is
1402          called.
1403
1404       4. Other chunks are consolidated as they arrive, and
1405          placed in corresponding bins. (This includes the case of
1406          consolidating with the current `last_remainder').
1407
1408*/
1409
1410
1411#if __STD_C
1412void fREe(Void_t* mem)
1413#else
1414void fREe(mem) Void_t* mem;
1415#endif
1416{
1417  mchunkptr p;         /* chunk corresponding to mem */
1418  INTERNAL_SIZE_T hd;  /* its head field */
1419  INTERNAL_SIZE_T sz;  /* its size */
1420  int       idx;       /* its bin index */
1421  mchunkptr next;      /* next contiguous chunk */
1422  INTERNAL_SIZE_T nextsz; /* its size */
1423  INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1424  mchunkptr bck;       /* misc temp for linking */
1425  mchunkptr fwd;       /* misc temp for linking */
1426  int       islr;      /* track whether merging with last_remainder */
1427
1428  if (mem == 0)                              /* free(0) has no effect */
1429    return;
1430
1431  p = mem2chunk(mem);
1432  hd = p->size;
1433
1434#if HAVE_MMAP
1435  if (hd & IS_MMAPPED)                       /* release mmapped memory. */
1436  {
1437    munmap_chunk(p);
1438    return;
1439  }
1440#endif
1441
1442  check_inuse_chunk(p);
1443
1444  sz = hd & ~PREV_INUSE;
1445  next = chunk_at_offset(p, sz);
1446  nextsz = chunksize(next);
1447
1448  if (next == top)                            /* merge with top */
1449  {
1450    sz += nextsz;
1451
1452    if (!(hd & PREV_INUSE))                    /* consolidate backward */
1453    {
1454      prevsz = p->prev_size;
1455      p = chunk_at_offset(p, -prevsz);
1456      sz += prevsz;
1457      unlink(p, bck, fwd);
1458    }
1459
1460    set_head(p, sz | PREV_INUSE);
1461    top = p;
1462    if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
1463      malloc_trim(top_pad);
1464    return;
1465  }
1466
1467  set_head(next, nextsz);                    /* clear inuse bit */
1468
1469  islr = 0;
1470
1471  if (!(hd & PREV_INUSE))                    /* consolidate backward */
1472  {
1473    prevsz = p->prev_size;
1474    p = chunk_at_offset(p, -prevsz);
1475    sz += prevsz;
1476
1477    if (p->fd == last_remainder)             /* keep as last_remainder */
1478      islr = 1;
1479    else
1480      unlink(p, bck, fwd);
1481  }
1482
1483  if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
1484  {
1485    sz += nextsz;
1486
1487    if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
1488    {
1489      islr = 1;
1490      link_last_remainder(p);
1491    }
1492    else
1493      unlink(next, bck, fwd);
1494  }
1495
1496
1497  set_head(p, sz | PREV_INUSE);
1498  set_foot(p, sz);
1499  if (!islr)
1500    frontlink(p, sz, idx, bck, fwd);
1501}
1502
1503
1504
1505
1506
1507/*
1508
1509  Realloc algorithm:
1510
1511    Chunks that were obtained via mmap cannot be extended or shrunk
1512    unless HAVE_MREMAP is defined, in which case mremap is used.
1513    Otherwise, if their reallocation is for additional space, they are
1514    copied.  If for less, they are just left alone.
1515
1516    Otherwise, if the reallocation is for additional space, and the
1517    chunk can be extended, it is, else a malloc-copy-free sequence is
1518    taken.  There are several different ways that a chunk could be
1519    extended. All are tried:
1520
1521       * Extending forward into following adjacent free chunk.
1522       * Shifting backwards, joining preceding adjacent space
1523       * Both shifting backwards and extending forward.
1524       * Extending into newly sbrked space
1525
1526    Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
1527    size argument of zero (re)allocates a minimum-sized chunk.
1528
1529    If the reallocation is for less space, and the new request is for
1530    a `small' (<512 bytes) size, then the newly unused space is lopped
1531    off and freed.
1532
1533    The old unix realloc convention of allowing the last-free'd chunk
1534    to be used as an argument to realloc is no longer supported.
1535    I don't know of any programs still relying on this feature,
1536    and allowing it would also allow too many other incorrect
1537    usages of realloc to be sensible.
1538
1539
1540*/
1541
1542
1543#if __STD_C
1544Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
1545#else
1546Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
1547#endif
1548{
1549  INTERNAL_SIZE_T    nb;      /* padded request size */
1550
1551  mchunkptr oldp;             /* chunk corresponding to oldmem */
1552  INTERNAL_SIZE_T    oldsize; /* its size */
1553
1554  mchunkptr newp;             /* chunk to return */
1555  INTERNAL_SIZE_T    newsize; /* its size */
1556  Void_t*   newmem;           /* corresponding user mem */
1557
1558  mchunkptr next;             /* next contiguous chunk after oldp */
1559  INTERNAL_SIZE_T  nextsize;  /* its size */
1560
1561  mchunkptr prev;             /* previous contiguous chunk before oldp */
1562  INTERNAL_SIZE_T  prevsize;  /* its size */
1563
1564  mchunkptr remainder;        /* holds split off extra space from newp */
1565  INTERNAL_SIZE_T  remainder_size;   /* its size */
1566
1567  mchunkptr bck;              /* misc temp for linking */
1568  mchunkptr fwd;              /* misc temp for linking */
1569
1570#ifdef REALLOC_ZERO_BYTES_FREES
1571  if (bytes == 0) { fREe(oldmem); return 0; }
1572#endif
1573
1574
1575  /* realloc of null is supposed to be same as malloc */
1576  if (oldmem == 0) return mALLOc(bytes);
1577
1578  newp    = oldp    = mem2chunk(oldmem);
1579  newsize = oldsize = chunksize(oldp);
1580
1581
1582  nb = request2size(bytes);
1583
1584#if HAVE_MMAP
1585  if (chunk_is_mmapped(oldp))
1586  {
1587#if HAVE_MREMAP
1588    newp = mremap_chunk(oldp, nb);
1589    if(newp) return chunk2mem(newp);
1590#endif
1591    /* Note the extra SIZE_SZ overhead. */
1592    if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1593    /* Must alloc, copy, free. */
1594    newmem = mALLOc(bytes);
1595    if (newmem == 0) return 0; /* propagate failure */
1596    MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1597    munmap_chunk(oldp);
1598    return newmem;
1599  }
1600#endif
1601
1602  check_inuse_chunk(oldp);
1603
1604  if ((long)(oldsize) < (long)(nb))
1605  {
1606
1607    /* Try expanding forward */
1608
1609    next = chunk_at_offset(oldp, oldsize);
1610    if (next == top || !inuse(next))
1611    {
1612      nextsize = chunksize(next);
1613
1614      /* Forward into top only if a remainder */
1615      if (next == top)
1616      {
1617        if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1618        {
1619          newsize += nextsize;
1620          top = chunk_at_offset(oldp, nb);
1621          set_head(top, (newsize - nb) | PREV_INUSE);
1622          set_head_size(oldp, nb);
1623          return chunk2mem(oldp);
1624        }
1625      }
1626
1627      /* Forward into next chunk */
1628      else if (((long)(nextsize + newsize) >= (long)(nb)))
1629      {
1630        unlink(next, bck, fwd);
1631        newsize  += nextsize;
1632        goto split;
1633      }
1634    }
1635    else
1636    {
1637      next = 0;
1638      nextsize = 0;
1639    }
1640
1641    /* Try shifting backwards. */
1642
1643    if (!prev_inuse(oldp))
1644    {
1645      prev = prev_chunk(oldp);
1646      prevsize = chunksize(prev);
1647
1648      /* try forward + backward first to save a later consolidation */
1649
1650      if (next != 0)
1651      {
1652        /* into top */
1653        if (next == top)
1654        {
1655          if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1656          {
1657            unlink(prev, bck, fwd);
1658            newp = prev;
1659            newsize += prevsize + nextsize;
1660            newmem = chunk2mem(newp);
1661            MALLOC_MOVE(newmem, oldmem, oldsize - SIZE_SZ);
1662            top = chunk_at_offset(newp, nb);
1663            set_head(top, (newsize - nb) | PREV_INUSE);
1664            set_head_size(newp, nb);
1665            return newmem;
1666          }
1667        }
1668
1669        /* into next chunk */
1670        else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1671        {
1672          unlink(next, bck, fwd);
1673          unlink(prev, bck, fwd);
1674          newp = prev;
1675          newsize += nextsize + prevsize;
1676          newmem = chunk2mem(newp);
1677          MALLOC_MOVE(newmem, oldmem, oldsize - SIZE_SZ);
1678          goto split;
1679        }
1680      }
1681
1682      /* backward only */
1683      if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
1684      {
1685        unlink(prev, bck, fwd);
1686        newp = prev;
1687        newsize += prevsize;
1688        newmem = chunk2mem(newp);
1689        MALLOC_MOVE(newmem, oldmem, oldsize - SIZE_SZ);
1690        goto split;
1691      }
1692    }
1693
1694    /* Must allocate */
1695
1696    newmem = mALLOc (bytes);
1697
1698    if (newmem == 0)  /* propagate failure */
1699      return 0;
1700
1701    /* Avoid copy if newp is next chunk after oldp. */
1702    /* (This can only happen when new chunk is sbrk'ed.) */
1703
1704    if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
1705    {
1706      newsize += chunksize(newp);
1707      newp = oldp;
1708      goto split;
1709    }
1710
1711    /* Otherwise copy, free, and exit */
1712    MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1713    fREe(oldmem);
1714    return newmem;
1715  }
1716
1717
1718 split:  /* split off extra room in old or expanded chunk */
1719
1720  if (newsize - nb >= MINSIZE) /* split off remainder */
1721  {
1722    remainder = chunk_at_offset(newp, nb);
1723    remainder_size = newsize - nb;
1724    set_head_size(newp, nb);
1725    set_head(remainder, remainder_size | PREV_INUSE);
1726    set_inuse_bit_at_offset(remainder, remainder_size);
1727    fREe(chunk2mem(remainder)); /* let free() deal with it */
1728  }
1729  else
1730  {
1731    set_head_size(newp, newsize);
1732    set_inuse_bit_at_offset(newp, newsize);
1733  }
1734
1735  check_inuse_chunk(newp);
1736  return chunk2mem(newp);
1737}
1738
1739
1740
1741
1742/*
1743
1744  memalign algorithm:
1745
1746    memalign requests more than enough space from malloc, finds a spot
1747    within that chunk that meets the alignment request, and then
1748    possibly frees the leading and trailing space.
1749
1750    The alignment argument must be a power of two. This property is not
1751    checked by memalign, so misuse may result in random runtime errors.
1752
1753    8-byte alignment is guaranteed by normal malloc calls, so don't
1754    bother calling memalign with an argument of 8 or less.
1755
1756    Overreliance on memalign is a sure way to fragment space.
1757
1758*/
1759
1760
1761#if __STD_C
1762Void_t* mEMALIGn(size_t alignment, size_t bytes)
1763#else
1764Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
1765#endif
1766{
1767  INTERNAL_SIZE_T    nb;      /* padded  request size */
1768  char*     m;                /* memory returned by malloc call */
1769  mchunkptr p;                /* corresponding chunk */
1770  char*     brk;              /* alignment point within p */
1771  mchunkptr newp;             /* chunk to return */
1772  INTERNAL_SIZE_T  newsize;   /* its size */
1773  INTERNAL_SIZE_T  leadsize;  /* leading space befor alignment point */
1774  mchunkptr remainder;        /* spare room at end to split off */
1775  long      remainder_size;   /* its size */
1776
1777  /* If need less alignment than we give anyway, just relay to malloc */
1778
1779  if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
1780
1781  /* Otherwise, ensure that it is at least a minimum chunk size */
1782
1783  if (alignment <  MINSIZE) alignment = MINSIZE;
1784
1785  /* Call malloc with worst case padding to hit alignment. */
1786
1787  nb = request2size(bytes);
1788  m  = (char*)(mALLOc(nb + alignment + MINSIZE));
1789
1790  if (m == 0) return 0; /* propagate failure */
1791
1792  p = mem2chunk(m);
1793
1794  if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
1795  {
1796#if HAVE_MMAP
1797    if(chunk_is_mmapped(p))
1798      return chunk2mem(p); /* nothing more to do */
1799#endif
1800  }
1801  else /* misaligned */
1802  {
1803    /*
1804      Find an aligned spot inside chunk.
1805      Since we need to give back leading space in a chunk of at
1806      least MINSIZE, if the first calculation places us at
1807      a spot with less than MINSIZE leader, we can move to the
1808      next aligned spot -- we've allocated enough total room so that
1809      this is always possible.
1810    */
1811
1812    brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -alignment);
1813    if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
1814
1815    newp = (mchunkptr)brk;
1816    leadsize = brk - (char*)(p);
1817    newsize = chunksize(p) - leadsize;
1818
1819#if HAVE_MMAP
1820    if(chunk_is_mmapped(p))
1821    {
1822      newp->prev_size = p->prev_size + leadsize;
1823      set_head(newp, newsize|IS_MMAPPED);
1824      return chunk2mem(newp);
1825    }
1826#endif
1827
1828    /* give back leader, use the rest */
1829
1830    set_head(newp, newsize | PREV_INUSE);
1831    set_inuse_bit_at_offset(newp, newsize);
1832    set_head_size(p, leadsize);
1833    fREe(chunk2mem(p));
1834    p = newp;
1835
1836    assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
1837  }
1838
1839  /* Also give back spare room at the end */
1840
1841  remainder_size = chunksize(p) - nb;
1842
1843  if (remainder_size >= (long)MINSIZE)
1844  {
1845    remainder = chunk_at_offset(p, nb);
1846    set_head(remainder, remainder_size | PREV_INUSE);
1847    set_head_size(p, nb);
1848    fREe(chunk2mem(remainder));
1849  }
1850
1851  check_inuse_chunk(p);
1852  return chunk2mem(p);
1853
1854}
1855
1856
1857
1858
1859/*
1860    valloc just invokes memalign with alignment argument equal
1861    to the page size of the system (or as near to this as can
1862    be figured out from all the includes/defines above.)
1863*/
1864
1865#if __STD_C
1866Void_t* vALLOc(size_t bytes)
1867#else
1868Void_t* vALLOc(bytes) size_t bytes;
1869#endif
1870{
1871  return mEMALIGn (malloc_getpagesize, bytes);
1872}
1873
1874/*
1875  pvalloc just invokes valloc for the nearest pagesize
1876  that will accommodate request
1877*/
1878
1879
1880#if __STD_C
1881Void_t* pvALLOc(size_t bytes)
1882#else
1883Void_t* pvALLOc(bytes) size_t bytes;
1884#endif
1885{
1886  size_t pagesize = malloc_getpagesize;
1887  return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
1888}
1889
1890/*
1891
1892  calloc calls malloc, then zeroes out the allocated chunk.
1893
1894*/
1895
1896#if __STD_C
1897Void_t* cALLOc(size_t n, size_t elem_size)
1898#else
1899Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
1900#endif
1901{
1902  mchunkptr p;
1903  INTERNAL_SIZE_T csz;
1904
1905  INTERNAL_SIZE_T sz = n * elem_size;
1906
1907  /* check if expand_top called, in which case don't need to clear */
1908#if MORECORE_CLEARS
1909  mchunkptr oldtop = top;
1910  INTERNAL_SIZE_T oldtopsize = chunksize(top);
1911#endif
1912  Void_t* mem = mALLOc (sz);
1913
1914  if (mem == 0)
1915    return 0;
1916  else
1917  {
1918    p = mem2chunk(mem);
1919
1920    /* Two optional cases in which clearing not necessary */
1921
1922
1923#if HAVE_MMAP
1924    if (chunk_is_mmapped(p)) return mem;
1925#endif
1926
1927    csz = chunksize(p);
1928
1929#if MORECORE_CLEARS
1930    if (p == oldtop && csz > oldtopsize)
1931    {
1932      /* clear only the bytes from non-freshly-sbrked memory */
1933      csz = oldtopsize;
1934    }
1935#endif
1936
1937    MALLOC_ZERO(mem, csz - SIZE_SZ);
1938    return mem;
1939  }
1940}
1941
1942/*
1943
1944  cfree just calls free. It is needed/defined on some systems
1945  that pair it with calloc, presumably for odd historical reasons.
1946
1947*/
1948
1949#if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
1950#if __STD_C
1951void cfree(Void_t *mem)
1952#else
1953void cfree(mem) Void_t *mem;
1954#endif
1955{
1956  fREe(mem);
1957}
1958#endif
1959
1960
1961
1962/*
1963
1964    Malloc_trim gives memory back to the system (via negative
1965    arguments to sbrk) if there is unused memory at the `high' end of
1966    the malloc pool. You can call this after freeing large blocks of
1967    memory to potentially reduce the system-level memory requirements
1968    of a program. However, it cannot guarantee to reduce memory. Under
1969    some allocation patterns, some large free blocks of memory will be
1970    locked between two used chunks, so they cannot be given back to
1971    the system.
1972
1973    The `pad' argument to malloc_trim represents the amount of free
1974    trailing space to leave untrimmed. If this argument is zero,
1975    only the minimum amount of memory to maintain internal data
1976    structures will be left (one page or less). Non-zero arguments
1977    can be supplied to maintain enough trailing space to service
1978    future expected allocations without having to re-obtain memory
1979    from the system.
1980
1981    Malloc_trim returns 1 if it actually released any memory, else 0.
1982
1983*/
1984
1985#if __STD_C
1986int malloc_trim(size_t pad)
1987#else
1988int malloc_trim(pad) size_t pad;
1989#endif
1990{
1991  long  top_size;        /* Amount of top-most memory */
1992  long  extra;           /* Amount to release */
1993  char* current_brk;     /* address returned by pre-check sbrk call */
1994  char* new_brk;         /* address returned by negative sbrk call */
1995
1996  unsigned long pagesz = malloc_getpagesize;
1997
1998  top_size = chunksize(top);
1999  extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2000
2001  if (extra < (long)pagesz)  /* Not enough memory to release */
2002    return 0;
2003
2004  else
2005  {
2006    /* Test to make sure no one else called sbrk */
2007    current_brk = (char*)(MORECORE (0));
2008    if (current_brk != (char*)(top) + top_size)
2009      return 0;     /* Apparently we don't own memory; must fail */
2010
2011    else
2012    {
2013      new_brk = (char*)(MORECORE (-extra));
2014
2015      if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2016      {
2017        /* Try to figure out what we have */
2018        current_brk = (char*)(MORECORE (0));
2019        top_size = current_brk - (char*)top;
2020        if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2021        {
2022          sbrked_mem = current_brk - sbrk_base;
2023          set_head(top, top_size | PREV_INUSE);
2024        }
2025        check_chunk(top);
2026        return 0;
2027      }
2028
2029      else
2030      {
2031        /* Success. Adjust top accordingly. */
2032        set_head(top, (top_size - extra) | PREV_INUSE);
2033        sbrked_mem -= extra;
2034        check_chunk(top);
2035        return 1;
2036      }
2037    }
2038  }
2039}
2040
2041
2042
2043/*
2044  malloc_usable_size:
2045
2046    This routine tells you how many bytes you can actually use in an
2047    allocated chunk, which may be more than you requested (although
2048    often not). You can use this many bytes without worrying about
2049    overwriting other allocated objects. Not a particularly great
2050    programming practice, but still sometimes useful.
2051
2052*/
2053
2054#if __STD_C
2055size_t malloc_usable_size(Void_t* mem)
2056#else
2057size_t malloc_usable_size(mem) Void_t* mem;
2058#endif
2059{
2060  mchunkptr p;
2061  if (mem == 0)
2062    return 0;
2063  else
2064  {
2065    p = mem2chunk(mem);
2066    if(!chunk_is_mmapped(p))
2067    {
2068      if (!inuse(p)) return 0;
2069      check_inuse_chunk(p);
2070      return chunksize(p) - SIZE_SZ;
2071    }
2072    return chunksize(p) - 2*SIZE_SZ;
2073  }
2074}
2075
2076
2077
2078
2079/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2080
2081void malloc_update_mallinfo()
2082{
2083  int i;
2084  mbinptr b;
2085  mchunkptr p;
2086#if DEBUG
2087  mchunkptr q;
2088#endif
2089
2090  INTERNAL_SIZE_T avail = chunksize(top);
2091  int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2092
2093  for (i = 1; i < NAV; ++i)
2094  {
2095    b = bin_at(i);
2096    for (p = last(b); p != b; p = p->bk)
2097    {
2098#if DEBUG
2099      check_free_chunk(p);
2100      for (q = next_chunk(p);
2101           q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2102           q = next_chunk(q))
2103        check_inuse_chunk(q);
2104#endif
2105      avail += chunksize(p);
2106      navail++;
2107    }
2108  }
2109
2110  current_mallinfo.ordblks = navail;
2111  current_mallinfo.uordblks = sbrked_mem - avail;
2112  current_mallinfo.fordblks = avail;
2113  current_mallinfo.hblks = n_mmaps;
2114  current_mallinfo.hblkhd = mmapped_mem;
2115  current_mallinfo.keepcost = chunksize(top);
2116
2117}
2118
2119
2120
2121/*
2122
2123  malloc_stats:
2124
2125    Prints on stderr the amount of space obtain from the system (both
2126    via sbrk and mmap), the maximum amount (which may be more than
2127    current if malloc_trim and/or munmap got called), the maximum
2128    number of simultaneous mmap regions used, and the current number
2129    of bytes allocated via malloc (or realloc, etc) but not yet
2130    freed. (Note that this is the number of bytes allocated, not the
2131    number requested. It will be larger than the number requested
2132    because of alignment and bookkeeping overhead.)
2133
2134*/
2135
2136void malloc_stats()
2137{
2138  malloc_update_mallinfo();
2139  fprintf(stderr, "max system bytes = %10u\n",
2140          (unsigned int)(max_total_mem));
2141  fprintf(stderr, "system bytes     = %10u\n",
2142          (unsigned int)(sbrked_mem + mmapped_mem));
2143  fprintf(stderr, "in use bytes     = %10u\n",
2144          (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
2145#if HAVE_MMAP
2146  fprintf(stderr, "max mmap regions = %10u\n",
2147          (unsigned int)max_n_mmaps);
2148#endif
2149}
2150
2151/*
2152  mallinfo returns a copy of updated current mallinfo.
2153*/
2154
2155struct mallinfo mALLINFo()
2156{
2157  malloc_update_mallinfo();
2158  return current_mallinfo;
2159}
2160
2161
2162
2163
2164/*
2165  mallopt:
2166
2167    mallopt is the general SVID/XPG interface to tunable parameters.
2168    The format is to provide a (parameter-number, parameter-value) pair.
2169    mallopt then sets the corresponding parameter to the argument
2170    value if it can (i.e., so long as the value is meaningful),
2171    and returns 1 if successful else 0.
2172
2173    See descriptions of tunable parameters above.
2174
2175*/
2176
2177#if __STD_C
2178int mALLOPt(int param_number, int value)
2179#else
2180int mALLOPt(param_number, value) int param_number; int value;
2181#endif
2182{
2183  switch(param_number)
2184  {
2185    case M_TRIM_THRESHOLD:
2186      trim_threshold = value; return 1;
2187    case M_TOP_PAD:
2188      top_pad = value; return 1;
2189    case M_MMAP_THRESHOLD:
2190      mmap_threshold = value; return 1;
2191    case M_MMAP_MAX:
2192#if HAVE_MMAP
2193      n_mmaps_max = value; return 1;
2194#else
2195      if (value != 0) return 0; else  n_mmaps_max = value; return 1;
2196#endif
2197
2198    default:
2199      return 0;
2200  }
2201}
2202
2203/*
2204
2205History:
2206
2207    V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
2208      * Fixed ordering problem with boundary-stamping
2209
2210    V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
2211      * Added pvalloc, as recommended by H.J. Liu
2212      * Added 64bit pointer support mainly from Wolfram Gloger
2213      * Added anonymously donated WIN32 sbrk emulation
2214      * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2215      * malloc_extend_top: fix mask error that caused wastage after
2216        foreign sbrks
2217      * Add linux mremap support code from HJ Liu
2218
2219    V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
2220      * Integrated most documentation with the code.
2221      * Add support for mmap, with help from
2222        Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2223      * Use last_remainder in more cases.
2224      * Pack bins using idea from  colin@nyx10.cs.du.edu
2225      * Use ordered bins instead of best-fit threshhold
2226      * Eliminate block-local decls to simplify tracing and debugging.
2227      * Support another case of realloc via move into top
2228      * Fix error occuring when initial sbrk_base not word-aligned.
2229      * Rely on page size for units instead of SBRK_UNIT to
2230        avoid surprises about sbrk alignment conventions.
2231      * Add mallinfo, mallopt. Thanks to Raymond Nijssen
2232        (raymond@es.ele.tue.nl) for the suggestion.
2233      * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2234      * More precautions for cases where other routines call sbrk,
2235        courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2236      * Added macros etc., allowing use in linux libc from
2237        H.J. Lu (hjl@gnu.ai.mit.edu)
2238      * Inverted this history list
2239
2240    V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
2241      * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2242      * Removed all preallocation code since under current scheme
2243        the work required to undo bad preallocations exceeds
2244        the work saved in good cases for most test programs.
2245      * No longer use return list or unconsolidated bins since
2246        no scheme using them consistently outperforms those that don't
2247        given above changes.
2248      * Use best fit for very large chunks to prevent some worst-cases.
2249      * Added some support for debugging
2250
2251    V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
2252      * Removed footers when chunks are in use. Thanks to
2253        Paul Wilson (wilson@cs.texas.edu) for the suggestion.
2254
2255    V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
2256      * Added malloc_trim, with help from Wolfram Gloger
2257        (wmglo@Dent.MED.Uni-Muenchen.DE).
2258
2259    V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
2260
2261    V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
2262      * realloc: try to expand in both directions
2263      * malloc: swap order of clean-bin strategy;
2264      * realloc: only conditionally expand backwards
2265      * Try not to scavenge used bins
2266      * Use bin counts as a guide to preallocation
2267      * Occasionally bin return list chunks in first scan
2268      * Add a few optimizations from colin@nyx10.cs.du.edu
2269
2270    V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
2271      * faster bin computation & slightly different binning
2272      * merged all consolidations to one part of malloc proper
2273         (eliminating old malloc_find_space & malloc_clean_bin)
2274      * Scan 2 returns chunks (not just 1)
2275      * Propagate failure in realloc if malloc returns 0
2276      * Add stuff to allow compilation on non-ANSI compilers
2277          from kpv@research.att.com
2278
2279    V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
2280      * removed potential for odd address access in prev_chunk
2281      * removed dependency on getpagesize.h
2282      * misc cosmetics and a bit more internal documentation
2283      * anticosmetics: mangled names in macros to evade debugger strangeness
2284      * tested on sparc, hp-700, dec-mips, rs6000
2285          with gcc & native cc (hp, dec only) allowing
2286          Detlefs & Zorn comparison study (in SIGPLAN Notices.)
2287
2288    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
2289      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
2290         structure of old version,  but most details differ.)
2291
2292*/
Note: See TracBrowser for help on using the repository browser.