source: git/omalloc/dlmalloc.h @ bee06d

spielwiese
Last change on this file since bee06d was 341696, checked in by Hans Schönemann <hannes@…>, 14 years ago
Adding Id property to all files git-svn-id: file:///usr/local/Singular/svn/trunk@12231 2c84dea3-7e68-4137-9b89-c4e89433aadc
  • Property mode set to 100644
File size: 35.2 KB
Line 
1/*******************************************************************
2 *  File:    dlmalloc.h
3 *  Purpose: declarations for dlmalloc
4 *  This was obtained by taking cutting out the beginning of malloc.c
5 *
6 *  Version: $Id$
7 *******************************************************************/
8#ifndef DL_MALLOC_H
9#define DL_MALLOC_H
10
11#define Void_t char
12
13/* define to -1 if you want that this implementation provides
14   malloc/calloc/realloc/free funcs */
15#ifndef OM_PROVIDE_MALLOC
16#define OM_PROVIDE_MALLOC 0
17#endif
18
19#ifndef HAVE__USR_INCLUDE_MALLOC_H
20#undef  HAVE__USR_INCLUDE_MALLOC_H
21#endif
22
23#ifdef HAVE__USR_INCLUDE_MALLOC_H
24#define HAVE_USR_INCLUDE_MALLOC_H 1
25#endif
26
27#define OM_MALLOC_MALLOC   mALLOc
28#define OM_MALLOC_REALLOC  rEALLOc
29#define OM_MALLOC_FREE     fREe
30#define OM_MALLOC_VALLOC   vALLOc
31#define OM_MALLOC_VFREE(addr, size) OM_MALLOC_FREE(addr)
32#define OM_MALLOC_SIZEOF_ADDR(addr) malloc_usable_size(addr)
33#define cfree cFREe
34
35/* Statistics */
36extern unsigned long mmapped_mem;
37extern unsigned long max_mmapped_mem;
38extern unsigned long max_total_mem;
39extern unsigned long max_sbrked_mem;
40extern struct mallinfo current_mallinfo;
41extern void malloc_update_mallinfo();
42
43#define OM_MALLOC_UPDATE_INFO           malloc_update_mallinfo()
44#define OM_MALLOC_USED_BYTES            current_mallinfo.uordblks + mmapped_mem
45#define OM_MALLOC_AVAIL_BYTES           current_mallinfo.fordblks
46#define OM_MALLOC_CURRENT_BYTES_MMAP    mmapped_mem
47#define OM_MALLOC_MAX_BYTES_MMAP        max_mmapped_mem
48#define OM_MALLOC_CURRENT_BYTES_SYSTEM  current_mallinfo.arena + mmapped_mem
49#define OM_MALLOC_MAX_BYTES_SYSTEM      max_total_mem
50#define OM_MALLOC_CURRENT_BYTES_SBRK    current_mallinfo.arena
51#define OM_MALLOC_MAX_BYTES_SBRK        max_sbrked_mem
52
53/* ---------- To make a malloc.h, start cutting here ------------ */
54
55/*
56  A version of malloc/free/realloc written by Doug Lea and released to the
57  public domain.  Send questions/comments/complaints/performance data
58  to dl@cs.oswego.edu
59
60* VERSION 2.6.5  Wed Jun 17 15:55:16 1998  Doug Lea  (dl at gee)
61
62   Note: There may be an updated version of this malloc obtainable at
63           ftp://g.oswego.edu/pub/misc/malloc.c
64         Check before installing!
65
66   Note: This version differs from 2.6.4 only by correcting a
67         statement ordering error that could cause failures only
68         when calls to this malloc are interposed with calls to
69         other memory allocators.
70
71* Why use this malloc?
72
73  This is not the fastest, most space-conserving, most portable, or
74  most tunable malloc ever written. However it is among the fastest
75  while also being among the most space-conserving, portable and tunable.
76  Consistent balance across these factors results in a good general-purpose
77  allocator. For a high-level description, see
78     http://g.oswego.edu/dl/html/malloc.html
79
80* Synopsis of public routines
81
82  (Much fuller descriptions are contained in the program documentation below.)
83
84  malloc(size_t n);
85     Return a pointer to a newly allocated chunk of at least n bytes, or null
86     if no space is available.
87  free(Void_t* p);
88     Release the chunk of memory pointed to by p, or no effect if p is null.
89  realloc(Void_t* p, size_t n);
90     Return a pointer to a chunk of size n that contains the same data
91     as does chunk p up to the minimum of (n, p's size) bytes, or null
92     if no space is available. The returned pointer may or may not be
93     the same as p. If p is null, equivalent to malloc.  Unless the
94     #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
95     size argument of zero (re)allocates a minimum-sized chunk.
96  memalign(size_t alignment, size_t n);
97     Return a pointer to a newly allocated chunk of n bytes, aligned
98     in accord with the alignment argument, which must be a power of
99     two.
100  valloc(size_t n);
101     Equivalent to memalign(pagesize, n), where pagesize is the page
102     size of the system (or as near to this as can be figured out from
103     all the includes/defines below.)
104  pvalloc(size_t n);
105     Equivalent to valloc(minimum-page-that-holds(n)), that is,
106     round up n to nearest pagesize.
107  calloc(size_t unit, size_t quantity);
108     Returns a pointer to quantity * unit bytes, with all locations
109     set to zero.
110  cfree(Void_t* p);
111     Equivalent to free(p).
112  malloc_trim(size_t pad);
113     Release all but pad bytes of freed top-most memory back
114     to the system. Return 1 if successful, else 0.
115  malloc_usable_size(Void_t* p);
116     Report the number usable allocated bytes associated with allocated
117     chunk p. This may or may not report more bytes than were requested,
118     due to alignment and minimum size constraints.
119  malloc_stats();
120     Prints brief summary statistics on stderr.
121  mallinfo()
122     Returns (by copy) a struct containing various summary statistics.
123  mallopt(int parameter_number, int parameter_value)
124     Changes one of the tunable parameters described below. Returns
125     1 if successful in changing the parameter, else 0.
126
127* Vital statistics:
128
129  Alignment:                            8-byte
130       8 byte alignment is currently hardwired into the design.  This
131       seems to suffice for all current machines and C compilers.
132
133  Assumed pointer representation:       4 or 8 bytes
134       Code for 8-byte pointers is untested by me but has worked
135       reliably by Wolfram Gloger, who contributed most of the
136       changes supporting this.
137
138  Assumed size_t  representation:       4 or 8 bytes
139       Note that size_t is allowed to be 4 bytes even if pointers are 8.
140
141  Minimum overhead per allocated chunk: 4 or 8 bytes
142       Each malloced chunk has a hidden overhead of 4 bytes holding size
143       and status information.
144
145  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
146                          8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
147
148       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
149       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
150       needed; 4 (8) for a trailing size field
151       and 8 (16) bytes for free list pointers. Thus, the minimum
152       allocatable size is 16/24/32 bytes.
153
154       Even a request for zero bytes (i.e., malloc(0)) returns a
155       pointer to something of the minimum allocatable size.
156
157  Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
158                          8-byte size_t: 2^63 - 16 bytes
159
160       It is assumed that (possibly signed) size_t bit values suffice to
161       represent chunk sizes. `Possibly signed' is due to the fact
162       that `size_t' may be defined on a system as either a signed or
163       an unsigned type. To be conservative, values that would appear
164       as negative numbers are avoided.
165       Requests for sizes with a negative sign bit will return a
166       minimum-sized chunk.
167
168  Maximum overhead wastage per allocated chunk: normally 15 bytes
169
170       Alignnment demands, plus the minimum allocatable size restriction
171       make the normal worst-case wastage 15 bytes (i.e., up to 15
172       more bytes will be allocated than were requested in malloc), with
173       two exceptions:
174         1. Because requests for zero bytes allocate non-zero space,
175            the worst case wastage for a request of zero bytes is 24 bytes.
176         2. For requests >= mmap_threshold that are serviced via
177            mmap(), the worst case wastage is 8 bytes plus the remainder
178            from a system page (the minimal mmap unit); typically 4096 bytes.
179
180* Limitations
181
182    Here are some features that are NOT currently supported
183
184    * No user-definable hooks for callbacks and the like.
185    * No automated mechanism for fully checking that all accesses
186      to malloced memory stay within their bounds.
187    * No support for compaction.
188
189* Synopsis of compile-time options:
190
191    People have reported using previous versions of this malloc on all
192    versions of Unix, sometimes by tweaking some of the defines
193    below. It has been tested most extensively on Solaris and
194    Linux. It is also reported to work on WIN32 platforms.
195    People have also reported adapting this malloc for use in
196    stand-alone embedded systems.
197
198    The implementation is in straight, hand-tuned ANSI C.  Among other
199    consequences, it uses a lot of macros.  Because of this, to be at
200    all usable, this code should be compiled using an optimizing compiler
201    (for example gcc -O2) that can simplify expressions and control
202    paths.
203
204  __STD_C                  (default: derived from C compiler defines)
205     Nonzero if using ANSI-standard C compiler, a C++ compiler, or
206     a C compiler sufficiently close to ANSI to get away with it.
207  DEBUG                    (default: NOT defined)
208     Define to enable debugging. Adds fairly extensive assertion-based
209     checking to help track down memory errors, but noticeably slows down
210     execution.
211  REALLOC_ZERO_BYTES_FREES (default: NOT defined)
212     Define this if you think that realloc(p, 0) should be equivalent
213     to free(p). Otherwise, since malloc returns a unique pointer for
214     malloc(0), so does realloc(p, 0).
215  HAVE_MEMCPY               (default: defined)
216     Define if you are not otherwise using ANSI STD C, but still
217     have memcpy and memset in your C library and want to use them.
218     Otherwise, simple internal versions are supplied.
219  USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
220     Define as 1 if you want the C library versions of memset and
221     memcpy called in realloc and calloc (otherwise macro versions are used).
222     At least on some platforms, the simple macro versions usually
223     outperform libc versions.
224  HAVE_MMAP                 (default: defined as 1)
225     Define to non-zero to optionally make malloc() use mmap() to
226     allocate very large blocks.
227  HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
228     Define to non-zero to optionally make realloc() use mremap() to
229     reallocate very large blocks.
230  malloc_getpagesize        (default: derived from system #includes)
231     Either a constant or routine call returning the system page size.
232  HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
233     Optionally define if you are on a system with a /usr/include/malloc.h
234     that declares struct mallinfo. It is not at all necessary to
235     define this even if you do, but will ensure consistency.
236  INTERNAL_SIZE_T           (default: size_t)
237     Define to a 32-bit type (probably `unsigned int') if you are on a
238     64-bit machine, yet do not want or need to allow malloc requests of
239     greater than 2^31 to be handled. This saves space, especially for
240     very small chunks.
241  INTERNAL_LINUX_C_LIB      (default: NOT defined)
242     Defined only when compiled as part of Linux libc.
243     Also note that there is some odd internal name-mangling via defines
244     (for example, internally, `malloc' is named `mALLOc') needed
245     when compiling in this case. These look funny but don't otherwise
246     affect anything.
247  WIN32                     (default: undefined)
248     Define this on MS win (95, nt) platforms to compile in sbrk emulation.
249  LACKS_UNISTD_H            (default: undefined)
250     Define this if your system does not have a <unistd.h>.
251  MORECORE                  (default: sbrk)
252     The name of the routine to call to obtain more memory from the system.
253  MORECORE_FAILURE          (default: -1)
254     The value returned upon failure of MORECORE.
255  MORECORE_CLEARS           (default 1)
256     True (1) if the routine mapped to MORECORE zeroes out memory (which
257     holds for sbrk).
258  DEFAULT_TRIM_THRESHOLD
259  DEFAULT_TOP_PAD
260  DEFAULT_MMAP_THRESHOLD
261  DEFAULT_MMAP_MAX
262     Default values of tunable parameters (described in detail below)
263     controlling interaction with host system routines (sbrk, mmap, etc).
264     These values may also be changed dynamically via mallopt(). The
265     preset defaults are those that give best performance for typical
266     programs/systems.
267
268
269*/
270
271
272
273
274/* Preliminaries */
275
276#ifndef __STD_C
277#ifdef __STDC__
278#define __STD_C     1
279#else
280#if __cplusplus
281#define __STD_C     1
282#else
283#define __STD_C     0
284#endif /*__cplusplus*/
285#endif /*__STDC__*/
286#endif /*__STD_C*/
287
288#ifndef Void_t
289#if __STD_C
290#define Void_t      void
291#else
292#define Void_t      char
293#endif
294#endif /*Void_t*/
295
296#if __STD_C
297#include <stddef.h>   /* for size_t */
298#else
299#include <sys/types.h>
300#endif
301
302#ifdef __cplusplus
303extern "C" {
304#endif
305
306#include <stdio.h>    /* needed for malloc_stats */
307
308
309/*
310  Compile-time options
311*/
312
313
314/*
315    Debugging:
316
317    Because freed chunks may be overwritten with link fields, this
318    malloc will often die when freed memory is overwritten by user
319    programs.  This can be very effective (albeit in an annoying way)
320    in helping track down dangling pointers.
321
322    If you compile with -DDEBUG, a number of assertion checks are
323    enabled that will catch more memory errors. You probably won't be
324    able to make much sense of the actual assertion errors, but they
325    should help you locate incorrectly overwritten memory.  The
326    checking is fairly extensive, and will slow down execution
327    noticeably. Calling malloc_stats or mallinfo with DEBUG set will
328    attempt to check every non-mmapped allocated and free chunk in the
329    course of computing the summmaries. (By nature, mmapped regions
330    cannot be checked very much automatically.)
331
332    Setting DEBUG may also be helpful if you are trying to modify
333    this code. The assertions in the check routines spell out in more
334    detail the assumptions and invariants underlying the algorithms.
335
336*/
337
338#if DEBUG
339#include <assert.h>
340#else
341#define assert(x) ((void)0)
342#endif
343
344
345/*
346  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
347  of chunk sizes. On a 64-bit machine, you can reduce malloc
348  overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
349  at the expense of not being able to handle requests greater than
350  2^31. This limitation is hardly ever a concern; you are encouraged
351  to set this. However, the default version is the same as size_t.
352*/
353
354#ifndef INTERNAL_SIZE_T
355#define INTERNAL_SIZE_T size_t
356#endif
357
358/*
359  REALLOC_ZERO_BYTES_FREES should be set if a call to
360  realloc with zero bytes should be the same as a call to free.
361  Some people think it should. Otherwise, since this malloc
362  returns a unique pointer for malloc(0), so does realloc(p, 0).
363*/
364
365
366/*   #define REALLOC_ZERO_BYTES_FREES */
367
368
369/*
370  WIN32 causes an emulation of sbrk to be compiled in
371  mmap-based options are not currently supported in WIN32.
372*/
373
374/* #define WIN32 */
375#ifdef WIN32
376#define MORECORE wsbrk
377#define HAVE_MMAP 0
378#endif
379
380
381/*
382  HAVE_MEMCPY should be defined if you are not otherwise using
383  ANSI STD C, but still have memcpy and memset in your C library
384  and want to use them in calloc and realloc. Otherwise simple
385  macro versions are defined here.
386
387  USE_MEMCPY should be defined as 1 if you actually want to
388  have memset and memcpy called. People report that the macro
389  versions are often enough faster than libc versions on many
390  systems that it is better to use them.
391
392*/
393
394#define HAVE_MEMCPY
395
396#ifndef USE_MEMCPY
397#ifdef HAVE_MEMCPY
398#define USE_MEMCPY 1
399#else
400#define USE_MEMCPY 0
401#endif
402#endif
403
404#if (__STD_C || defined(HAVE_MEMCPY))
405
406#if __STD_C
407void* memset(void*, int, size_t);
408void* memcpy(void*, const void*, size_t);
409void *memmove(void*, const void *, size_t);
410#else
411Void_t* memset();
412Void_t* memcpy();
413Void_t* memmove();
414#endif
415#endif
416
417#if USE_MEMCPY
418
419/* The following macros are only invoked with (2n+1)-multiples of
420   INTERNAL_SIZE_T units, with a positive integer n. This is exploited
421   for fast inline execution when n is small. */
422
423#define MALLOC_ZERO(charp, nbytes)                                            \
424do {                                                                          \
425  INTERNAL_SIZE_T mzsz = (nbytes);                                            \
426  if(mzsz <= 9*sizeof(mzsz)) {                                                \
427    INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
428    if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
429                                     *mz++ = 0;                               \
430      if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
431                                     *mz++ = 0;                               \
432        if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
433                                     *mz++ = 0; }}}                           \
434                                     *mz++ = 0;                               \
435                                     *mz++ = 0;                               \
436                                     *mz   = 0;                               \
437  } else memset((charp), 0, mzsz);                                            \
438} while(0)
439
440#define MALLOC_COPY(dest,src,nbytes)                                          \
441do {                                                                          \
442  INTERNAL_SIZE_T mcsz = (nbytes);                                            \
443  if(mcsz <= 9*sizeof(mcsz)) {                                                \
444    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
445    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
446    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
447                                     *mcdst++ = *mcsrc++;                     \
448      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
449                                     *mcdst++ = *mcsrc++;                     \
450        if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
451                                     *mcdst++ = *mcsrc++; }}}                 \
452                                     *mcdst++ = *mcsrc++;                     \
453                                     *mcdst++ = *mcsrc++;                     \
454                                     *mcdst   = *mcsrc  ;                     \
455  } else memcpy(dest, src, mcsz);                                             \
456} while(0)
457
458#define MALLOC_MOVE(dest,src,nbytes)                                          \
459do {                                                                          \
460  INTERNAL_SIZE_T mcsz = (nbytes);                                            \
461  if(mcsz <= 9*sizeof(mcsz)) {                                                \
462    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
463    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
464    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
465                                     *mcdst++ = *mcsrc++;                     \
466      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
467                                     *mcdst++ = *mcsrc++;                     \
468        if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
469                                     *mcdst++ = *mcsrc++; }}}                 \
470                                     *mcdst++ = *mcsrc++;                     \
471                                     *mcdst++ = *mcsrc++;                     \
472                                     *mcdst   = *mcsrc  ;                     \
473  } else memmove(dest, src, mcsz);                                            \
474} while(0)
475
476#else /* !USE_MEMCPY */
477
478/* Use Duff's device for good zeroing/copying performance. */
479
480#define MALLOC_ZERO(charp, nbytes)                                            \
481do {                                                                          \
482  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
483  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
484  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
485  switch (mctmp) {                                                            \
486    case 0: for(;;) { *mzp++ = 0;                                             \
487    case 7:           *mzp++ = 0;                                             \
488    case 6:           *mzp++ = 0;                                             \
489    case 5:           *mzp++ = 0;                                             \
490    case 4:           *mzp++ = 0;                                             \
491    case 3:           *mzp++ = 0;                                             \
492    case 2:           *mzp++ = 0;                                             \
493    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
494  }                                                                           \
495} while(0)
496
497#define MALLOC_COPY(dest,src,nbytes)                                          \
498do {                                                                          \
499  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
500  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
501  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
502  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
503  switch (mctmp) {                                                            \
504    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
505    case 7:           *mcdst++ = *mcsrc++;                                    \
506    case 6:           *mcdst++ = *mcsrc++;                                    \
507    case 5:           *mcdst++ = *mcsrc++;                                    \
508    case 4:           *mcdst++ = *mcsrc++;                                    \
509    case 3:           *mcdst++ = *mcsrc++;                                    \
510    case 2:           *mcdst++ = *mcsrc++;                                    \
511    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
512  }                                                                           \
513} while(0)
514
515#define MALLOC_MOVE(dest,src,nbytes) MALLOC_COPY(dest,src,nbytes)
516
517#endif
518
519
520/*
521  Define HAVE_MMAP to optionally make malloc() use mmap() to
522  allocate very large blocks.  These will be returned to the
523  operating system immediately after a free().
524*/
525
526#ifndef HAVE_MMAP
527#define HAVE_MMAP 1
528#endif
529
530/*
531  Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
532  large blocks.  This is currently only possible on Linux with
533  kernel versions newer than 1.3.77.
534*/
535
536#ifndef HAVE_MREMAP
537#ifdef INTERNAL_LINUX_C_LIB
538#define HAVE_MREMAP 1
539#else
540#define HAVE_MREMAP 0
541#endif
542#endif
543
544#if HAVE_MMAP
545
546#include <unistd.h>
547#include <fcntl.h>
548#include <sys/mman.h>
549
550#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
551#define MAP_ANONYMOUS MAP_ANON
552#endif
553
554#endif /* HAVE_MMAP */
555
556/*
557  Access to system page size. To the extent possible, this malloc
558  manages memory from the system in page-size units.
559
560  The following mechanics for getpagesize were adapted from
561  bsd/gnu getpagesize.h
562*/
563
564#ifndef LACKS_UNISTD_H
565#  include <unistd.h>
566#endif
567
568#ifndef malloc_getpagesize
569#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
570#    ifndef _SC_PAGE_SIZE
571#      define _SC_PAGE_SIZE _SC_PAGESIZE
572#    endif
573#  endif
574#  ifdef _SC_PAGE_SIZE
575#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
576#  else
577#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
578       extern size_t getpagesize();
579#      define malloc_getpagesize getpagesize()
580#    else
581#      include <sys/param.h>
582#      ifdef EXEC_PAGESIZE
583#        define malloc_getpagesize EXEC_PAGESIZE
584#      else
585#        ifdef NBPG
586#          ifndef CLSIZE
587#            define malloc_getpagesize NBPG
588#          else
589#            define malloc_getpagesize (NBPG * CLSIZE)
590#          endif
591#        else
592#          ifdef NBPC
593#            define malloc_getpagesize NBPC
594#          else
595#            ifdef PAGESIZE
596#              define malloc_getpagesize PAGESIZE
597#            else
598#              define malloc_getpagesize (4096) /* just guess */
599#            endif
600#          endif
601#        endif
602#      endif
603#    endif
604#  endif
605#endif
606
607
608
609/*
610
611  This version of malloc supports the standard SVID/XPG mallinfo
612  routine that returns a struct containing the same kind of
613  information you can get from malloc_stats. It should work on
614  any SVID/XPG compliant system that has a /usr/include/malloc.h
615  defining struct mallinfo. (If you'd like to install such a thing
616  yourself, cut out the preliminary declarations as described above
617  and below and save them in a malloc.h file. But there's no
618  compelling reason to bother to do this.)
619
620  The main declaration needed is the mallinfo struct that is returned
621  (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
622  bunch of fields, most of which are not even meaningful in this
623  version of malloc. Some of these fields are are instead filled by
624  mallinfo() with other numbers that might possibly be of interest.
625
626  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
627  /usr/include/malloc.h file that includes a declaration of struct
628  mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
629  version is declared below.  These must be precisely the same for
630  mallinfo() to work.
631
632*/
633
634/* #define HAVE_USR_INCLUDE_MALLOC_H */
635
636#if HAVE_USR_INCLUDE_MALLOC_H
637#include <malloc.h>
638#include <mylimits.h>
639#else
640
641/* SVID2/XPG mallinfo structure */
642
643struct mallinfo {
644  int arena;    /* total space allocated from system */
645  int ordblks;  /* number of non-inuse chunks */
646  int smblks;   /* unused -- always zero */
647  int hblks;    /* number of mmapped regions */
648  int hblkhd;   /* total space in mmapped regions */
649  int usmblks;  /* unused -- always zero */
650  int fsmblks;  /* unused -- always zero */
651  int uordblks; /* total allocated space */
652  int fordblks; /* total non-inuse space */
653  int keepcost; /* top-most, releasable (via malloc_trim) space */
654};     
655
656/* SVID2/XPG mallopt options */
657
658#define M_MXFAST  1    /* UNUSED in this malloc */
659#define M_NLBLKS  2    /* UNUSED in this malloc */
660#define M_GRAIN   3    /* UNUSED in this malloc */
661#define M_KEEP    4    /* UNUSED in this malloc */
662
663#endif
664
665/* mallopt options that actually do something */
666
667#define M_TRIM_THRESHOLD    -1
668#define M_TOP_PAD           -2
669#define M_MMAP_THRESHOLD    -3
670#define M_MMAP_MAX          -4
671
672
673
674#ifndef DEFAULT_TRIM_THRESHOLD
675#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
676#endif
677
678/*
679    M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
680      to keep before releasing via malloc_trim in free().
681
682      Automatic trimming is mainly useful in long-lived programs.
683      Because trimming via sbrk can be slow on some systems, and can
684      sometimes be wasteful (in cases where programs immediately
685      afterward allocate more large chunks) the value should be high
686      enough so that your overall system performance would improve by
687      releasing.
688
689      The trim threshold and the mmap control parameters (see below)
690      can be traded off with one another. Trimming and mmapping are
691      two different ways of releasing unused memory back to the
692      system. Between these two, it is often possible to keep
693      system-level demands of a long-lived program down to a bare
694      minimum. For example, in one test suite of sessions measuring
695      the XF86 X server on Linux, using a trim threshold of 128K and a
696      mmap threshold of 192K led to near-minimal long term resource
697      consumption.
698
699      If you are using this malloc in a long-lived program, it should
700      pay to experiment with these values.  As a rough guide, you
701      might set to a value close to the average size of a process
702      (program) running on your system.  Releasing this much memory
703      would allow such a process to run in memory.  Generally, it's
704      worth it to tune for trimming rather tham memory mapping when a
705      program undergoes phases where several large chunks are
706      allocated and released in ways that can reuse each other's
707      storage, perhaps mixed with phases where there are no such
708      chunks at all.  And in well-behaved long-lived programs,
709      controlling release of large blocks via trimming versus mapping
710      is usually faster.
711
712      However, in most programs, these parameters serve mainly as
713      protection against the system-level effects of carrying around
714      massive amounts of unneeded memory. Since frequent calls to
715      sbrk, mmap, and munmap otherwise degrade performance, the default
716      parameters are set to relatively high values that serve only as
717      safeguards.
718
719      The default trim value is high enough to cause trimming only in
720      fairly extreme (by current memory consumption standards) cases.
721      It must be greater than page size to have any useful effect.  To
722      disable trimming completely, you can set to (unsigned long)(-1);
723
724
725*/
726
727
728#ifndef DEFAULT_TOP_PAD
729#define DEFAULT_TOP_PAD        (0)
730#endif
731
732/*
733    M_TOP_PAD is the amount of extra `padding' space to allocate or
734      retain whenever sbrk is called. It is used in two ways internally:
735
736      * When sbrk is called to extend the top of the arena to satisfy
737        a new malloc request, this much padding is added to the sbrk
738        request.
739
740      * When malloc_trim is called automatically from free(),
741        it is used as the `pad' argument.
742
743      In both cases, the actual amount of padding is rounded
744      so that the end of the arena is always a system page boundary.
745
746      The main reason for using padding is to avoid calling sbrk so
747      often. Having even a small pad greatly reduces the likelihood
748      that nearly every malloc request during program start-up (or
749      after trimming) will invoke sbrk, which needlessly wastes
750      time.
751
752      Automatic rounding-up to page-size units is normally sufficient
753      to avoid measurable overhead, so the default is 0.  However, in
754      systems where sbrk is relatively slow, it can pay to increase
755      this value, at the expense of carrying around more memory than
756      the program needs.
757
758*/
759
760
761#ifndef DEFAULT_MMAP_THRESHOLD
762#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
763#endif
764
765/*
766
767    M_MMAP_THRESHOLD is the request size threshold for using mmap()
768      to service a request. Requests of at least this size that cannot
769      be allocated using already-existing space will be serviced via mmap.
770      (If enough normal freed space already exists it is used instead.)
771
772      Using mmap segregates relatively large chunks of memory so that
773      they can be individually obtained and released from the host
774      system. A request serviced through mmap is never reused by any
775      other request (at least not directly; the system may just so
776      happen to remap successive requests to the same locations).
777
778      Segregating space in this way has the benefit that mmapped space
779      can ALWAYS be individually released back to the system, which
780      helps keep the system level memory demands of a long-lived
781      program low. Mapped memory can never become `locked' between
782      other chunks, as can happen with normally allocated chunks, which
783      menas that even trimming via malloc_trim would not release them.
784
785      However, it has the disadvantages that:
786
787         1. The space cannot be reclaimed, consolidated, and then
788            used to service later requests, as happens with normal chunks.
789         2. It can lead to more wastage because of mmap page alignment
790            requirements
791         3. It causes malloc performance to be more dependent on host
792            system memory management support routines which may vary in
793            implementation quality and may impose arbitrary
794            limitations. Generally, servicing a request via normal
795            malloc steps is faster than going through a system's mmap.
796
797      All together, these considerations should lead you to use mmap
798      only for relatively large requests.
799
800
801*/
802
803
804
805#ifndef DEFAULT_MMAP_MAX
806#if HAVE_MMAP
807#define DEFAULT_MMAP_MAX       (64)
808#else
809#define DEFAULT_MMAP_MAX       (0)
810#endif
811#endif
812
813/*
814    M_MMAP_MAX is the maximum number of requests to simultaneously
815      service using mmap. This parameter exists because:
816
817         1. Some systems have a limited number of internal tables for
818            use by mmap.
819         2. In most systems, overreliance on mmap can degrade overall
820            performance.
821         3. If a program allocates many large regions, it is probably
822            better off using normal sbrk-based allocation routines that
823            can reclaim and reallocate normal heap memory. Using a
824            small value allows transition into this mode after the
825            first few allocations.
826
827      Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
828      the default value is 0, and attempts to set it to non-zero values
829      in mallopt will fail.
830*/
831
832
833
834
835/*
836
837  Special defines for linux libc
838
839  Except when compiled using these special defines for Linux libc
840  using weak aliases, this malloc is NOT designed to work in
841  multithreaded applications.  No semaphores or other concurrency
842  control are provided to ensure that multiple malloc or free calls
843  don't run at the same time, which could be disasterous. A single
844  semaphore could be used across malloc, realloc, and free (which is
845  essentially the effect of the linux weak alias approach). It would
846  be hard to obtain finer granularity.
847
848*/
849
850
851#ifdef INTERNAL_LINUX_C_LIB
852
853#if __STD_C
854
855Void_t * __default_morecore_init (ptrdiff_t);
856Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;
857
858#else
859
860Void_t * __default_morecore_init ();
861Void_t *(*__morecore)() = __default_morecore_init;
862
863#endif
864
865#define MORECORE (*__morecore)
866#define MORECORE_FAILURE 0
867#define MORECORE_CLEARS 1
868
869#else /* INTERNAL_LINUX_C_LIB */
870
871#if __STD_C
872  #ifndef __MACH__
873    extern Void_t*     sbrk(ptrdiff_t);
874  #else
875    extern void    *sbrk(int);
876  #endif
877#else
878extern Void_t*     sbrk();
879#endif
880
881#ifndef MORECORE
882#define MORECORE sbrk
883#endif
884
885#ifndef MORECORE_FAILURE
886#define MORECORE_FAILURE -1
887#endif
888
889#ifndef MORECORE_CLEARS
890#define MORECORE_CLEARS 1
891#endif
892
893#endif /* INTERNAL_LINUX_C_LIB */
894
895#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)
896
897#define cALLOc          __libc_calloc
898#define fREe            __libc_free
899#define mALLOc          __libc_malloc
900#define mEMALIGn        __libc_memalign
901#define rEALLOc         __libc_realloc
902#define vALLOc          __libc_valloc
903#define pvALLOc         __libc_pvalloc
904#define mALLINFo        __libc_mallinfo
905#define mALLOPt         __libc_mallopt
906
907#pragma weak calloc = __libc_calloc
908#pragma weak free = __libc_free
909#pragma weak cfree = __libc_free
910#pragma weak malloc = __libc_malloc
911#pragma weak memalign = __libc_memalign
912#pragma weak realloc = __libc_realloc
913#pragma weak valloc = __libc_valloc
914#pragma weak pvalloc = __libc_pvalloc
915#pragma weak mallinfo = __libc_mallinfo
916#pragma weak mallopt = __libc_mallopt
917
918#else
919
920#if OM_PROVIDE_MALLOC < 0
921#define cALLOc          calloc
922#define fREe            free
923#define mALLOc          malloc
924#define rEALLOc         realloc
925#define mEMALIGn        memalign
926#define vALLOc          valloc
927#define pvALLOc         pvalloc
928#define mALLINFo        mallinfo
929#define mALLOPt         mallopt
930#endif
931
932#endif
933
934/* Public routines */
935
936#if __STD_C
937
938Void_t* mALLOc(size_t);
939void    fREe(Void_t*);
940Void_t* rEALLOc(Void_t*, size_t);
941Void_t* mEMALIGn(size_t, size_t);
942Void_t* vALLOc(size_t);
943Void_t* pvALLOc(size_t);
944Void_t* cALLOc(size_t, size_t);
945void    cfree(Void_t*);
946int     malloc_trim(size_t);
947size_t  malloc_usable_size(Void_t*);
948void    malloc_stats();
949int     mALLOPt(int, int);
950struct mallinfo mALLINFo(void);
951#else
952Void_t* mALLOc();
953void    fREe();
954Void_t* rEALLOc();
955Void_t* mEMALIGn();
956Void_t* vALLOc();
957Void_t* pvALLOc();
958Void_t* cALLOc();
959void    cfree();
960int     malloc_trim();
961size_t  malloc_usable_size();
962void    malloc_stats();
963int     mALLOPt();
964struct mallinfo mALLINFo();
965#endif
966
967
968#ifdef __cplusplus
969};  /* end of extern "C" */
970#endif
971
972/* ---------- To make a malloc.h, end cutting here ------------ */
973
974#endif /* DL_MALLOC_H */
Note: See TracBrowser for help on using the repository browser.