source: git/omalloc/dlmalloc.h @ f3398d

fieker-DuValspielwiese
Last change on this file since f3398d was 6acb5a5, checked in by Olaf Bachmann <obachman@…>, 24 years ago
HP bug fixes git-svn-id: file:///usr/local/Singular/svn/trunk@4534 2c84dea3-7e68-4137-9b89-c4e89433aadc
  • Property mode set to 100644
File size: 33.8 KB
Line 
1/*******************************************************************
2 *  File:    dlmalloc.h
3 *  Purpose: declarations for dlmalloc
4 *  This was obtained by taking cutting out the beginning of malloc.c
5 *
6 *  Version: $Id: dlmalloc.h,v 1.5 2000-08-16 12:55:46 obachman Exp $
7 *******************************************************************/
8#ifndef DL_MALLOC_H
9#define DL_MALLOC_H
10
11/* define to -1 if you want that this implementation provides
12   malloc/calloc/realloc/free funcs */
13#ifndef OM_PROVIDE_MALLOC
14#define OM_PROVIDE_MALLOC 0
15#endif
16
17#ifndef HAVE__USR_INCLUDE_MALLOC_H
18#undef  HAVE__USR_INCLUDE_MALLOC_H
19#endif
20
21#ifdef HAVE__USR_INCLUDE_MALLOC_H
22#define HAVE_USR_INCLUDE_MALLOC_H 1
23#endif
24
25#define OM_MALLOC_MALLOC   mALLOc
26#define OM_MALLOC_REALLOC  rEALLOc
27#define OM_MALLOC_FREE     fREe
28#define OM_MALLOC_VALLOC   vALLOc
29#define OM_MALLOC_VFREE(addr, size) OM_MALLOC_FREE(addr)
30#define OM_MALLOC_SIZEOF_ADDR(addr) malloc_usable_size(addr)
31#define cfree cFREe
32
33/* Statistics */
34extern unsigned long mmapped_mem;
35extern unsigned long max_mmapped_mem;
36extern unsigned long max_total_mem; 
37extern unsigned long max_sbrked_mem; 
38extern struct mallinfo current_mallinfo;
39extern void malloc_update_mallinfo();
40
41#define OM_MALLOC_UPDATE_INFO           malloc_update_mallinfo()
42#define OM_MALLOC_USED_BYTES            current_mallinfo.uordblks + mmapped_mem           
43#define OM_MALLOC_AVAIL_BYTES           current_mallinfo.fordblks
44#define OM_MALLOC_CURRENT_BYTES_MMAP    mmapped_mem
45#define OM_MALLOC_MAX_BYTES_MMAP        max_mmapped_mem
46#define OM_MALLOC_CURRENT_BYTES_SYSTEM  current_mallinfo.arena + mmapped_mem
47#define OM_MALLOC_MAX_BYTES_SYSTEM      max_total_mem
48#define OM_MALLOC_CURRENT_BYTES_SBRK    current_mallinfo.arena
49#define OM_MALLOC_MAX_BYTES_SBRK        max_sbrked_mem
50
51/* ---------- To make a malloc.h, start cutting here ------------ */
52
53/*
54  A version of malloc/free/realloc written by Doug Lea and released to the
55  public domain.  Send questions/comments/complaints/performance data
56  to dl@cs.oswego.edu
57
58* VERSION 2.6.5  Wed Jun 17 15:55:16 1998  Doug Lea  (dl at gee)
59 
60   Note: There may be an updated version of this malloc obtainable at
61           ftp://g.oswego.edu/pub/misc/malloc.c
62         Check before installing!
63
64   Note: This version differs from 2.6.4 only by correcting a
65         statement ordering error that could cause failures only
66         when calls to this malloc are interposed with calls to
67         other memory allocators.
68
69* Why use this malloc?
70
71  This is not the fastest, most space-conserving, most portable, or
72  most tunable malloc ever written. However it is among the fastest
73  while also being among the most space-conserving, portable and tunable.
74  Consistent balance across these factors results in a good general-purpose
75  allocator. For a high-level description, see
76     http://g.oswego.edu/dl/html/malloc.html
77
78* Synopsis of public routines
79
80  (Much fuller descriptions are contained in the program documentation below.)
81
82  malloc(size_t n);
83     Return a pointer to a newly allocated chunk of at least n bytes, or null
84     if no space is available.
85  free(Void_t* p);
86     Release the chunk of memory pointed to by p, or no effect if p is null.
87  realloc(Void_t* p, size_t n);
88     Return a pointer to a chunk of size n that contains the same data
89     as does chunk p up to the minimum of (n, p's size) bytes, or null
90     if no space is available. The returned pointer may or may not be
91     the same as p. If p is null, equivalent to malloc.  Unless the
92     #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
93     size argument of zero (re)allocates a minimum-sized chunk.
94  memalign(size_t alignment, size_t n);
95     Return a pointer to a newly allocated chunk of n bytes, aligned
96     in accord with the alignment argument, which must be a power of
97     two.
98  valloc(size_t n);
99     Equivalent to memalign(pagesize, n), where pagesize is the page
100     size of the system (or as near to this as can be figured out from
101     all the includes/defines below.)
102  pvalloc(size_t n);
103     Equivalent to valloc(minimum-page-that-holds(n)), that is,
104     round up n to nearest pagesize.
105  calloc(size_t unit, size_t quantity);
106     Returns a pointer to quantity * unit bytes, with all locations
107     set to zero.
108  cfree(Void_t* p);
109     Equivalent to free(p).
110  malloc_trim(size_t pad);
111     Release all but pad bytes of freed top-most memory back
112     to the system. Return 1 if successful, else 0.
113  malloc_usable_size(Void_t* p);
114     Report the number usable allocated bytes associated with allocated
115     chunk p. This may or may not report more bytes than were requested,
116     due to alignment and minimum size constraints.
117  malloc_stats();
118     Prints brief summary statistics on stderr.
119  mallinfo()
120     Returns (by copy) a struct containing various summary statistics.
121  mallopt(int parameter_number, int parameter_value)
122     Changes one of the tunable parameters described below. Returns
123     1 if successful in changing the parameter, else 0.
124
125* Vital statistics:
126
127  Alignment:                            8-byte
128       8 byte alignment is currently hardwired into the design.  This
129       seems to suffice for all current machines and C compilers.
130
131  Assumed pointer representation:       4 or 8 bytes
132       Code for 8-byte pointers is untested by me but has worked
133       reliably by Wolfram Gloger, who contributed most of the
134       changes supporting this.
135
136  Assumed size_t  representation:       4 or 8 bytes
137       Note that size_t is allowed to be 4 bytes even if pointers are 8.       
138
139  Minimum overhead per allocated chunk: 4 or 8 bytes
140       Each malloced chunk has a hidden overhead of 4 bytes holding size
141       and status information. 
142
143  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
144                          8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
145                                     
146       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
147       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
148       needed; 4 (8) for a trailing size field
149       and 8 (16) bytes for free list pointers. Thus, the minimum
150       allocatable size is 16/24/32 bytes.
151
152       Even a request for zero bytes (i.e., malloc(0)) returns a
153       pointer to something of the minimum allocatable size.
154
155  Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
156                          8-byte size_t: 2^63 - 16 bytes
157
158       It is assumed that (possibly signed) size_t bit values suffice to
159       represent chunk sizes. `Possibly signed' is due to the fact
160       that `size_t' may be defined on a system as either a signed or
161       an unsigned type. To be conservative, values that would appear
162       as negative numbers are avoided. 
163       Requests for sizes with a negative sign bit will return a
164       minimum-sized chunk.
165
166  Maximum overhead wastage per allocated chunk: normally 15 bytes
167
168       Alignnment demands, plus the minimum allocatable size restriction
169       make the normal worst-case wastage 15 bytes (i.e., up to 15
170       more bytes will be allocated than were requested in malloc), with
171       two exceptions:
172         1. Because requests for zero bytes allocate non-zero space,
173            the worst case wastage for a request of zero bytes is 24 bytes.
174         2. For requests >= mmap_threshold that are serviced via
175            mmap(), the worst case wastage is 8 bytes plus the remainder
176            from a system page (the minimal mmap unit); typically 4096 bytes.
177
178* Limitations
179
180    Here are some features that are NOT currently supported
181
182    * No user-definable hooks for callbacks and the like.
183    * No automated mechanism for fully checking that all accesses
184      to malloced memory stay within their bounds.
185    * No support for compaction.
186
187* Synopsis of compile-time options:
188
189    People have reported using previous versions of this malloc on all
190    versions of Unix, sometimes by tweaking some of the defines
191    below. It has been tested most extensively on Solaris and
192    Linux. It is also reported to work on WIN32 platforms.
193    People have also reported adapting this malloc for use in
194    stand-alone embedded systems.
195
196    The implementation is in straight, hand-tuned ANSI C.  Among other
197    consequences, it uses a lot of macros.  Because of this, to be at
198    all usable, this code should be compiled using an optimizing compiler
199    (for example gcc -O2) that can simplify expressions and control
200    paths.
201
202  __STD_C                  (default: derived from C compiler defines)
203     Nonzero if using ANSI-standard C compiler, a C++ compiler, or
204     a C compiler sufficiently close to ANSI to get away with it.
205  DEBUG                    (default: NOT defined)
206     Define to enable debugging. Adds fairly extensive assertion-based
207     checking to help track down memory errors, but noticeably slows down
208     execution.
209  REALLOC_ZERO_BYTES_FREES (default: NOT defined)
210     Define this if you think that realloc(p, 0) should be equivalent
211     to free(p). Otherwise, since malloc returns a unique pointer for
212     malloc(0), so does realloc(p, 0).
213  HAVE_MEMCPY               (default: defined)
214     Define if you are not otherwise using ANSI STD C, but still
215     have memcpy and memset in your C library and want to use them.
216     Otherwise, simple internal versions are supplied.
217  USE_MEMCPY               (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
218     Define as 1 if you want the C library versions of memset and
219     memcpy called in realloc and calloc (otherwise macro versions are used).
220     At least on some platforms, the simple macro versions usually
221     outperform libc versions.
222  HAVE_MMAP                 (default: defined as 1)
223     Define to non-zero to optionally make malloc() use mmap() to
224     allocate very large blocks. 
225  HAVE_MREMAP                 (default: defined as 0 unless Linux libc set)
226     Define to non-zero to optionally make realloc() use mremap() to
227     reallocate very large blocks. 
228  malloc_getpagesize        (default: derived from system #includes)
229     Either a constant or routine call returning the system page size.
230  HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
231     Optionally define if you are on a system with a /usr/include/malloc.h
232     that declares struct mallinfo. It is not at all necessary to
233     define this even if you do, but will ensure consistency.
234  INTERNAL_SIZE_T           (default: size_t)
235     Define to a 32-bit type (probably `unsigned int') if you are on a
236     64-bit machine, yet do not want or need to allow malloc requests of
237     greater than 2^31 to be handled. This saves space, especially for
238     very small chunks.
239  INTERNAL_LINUX_C_LIB      (default: NOT defined)
240     Defined only when compiled as part of Linux libc.
241     Also note that there is some odd internal name-mangling via defines
242     (for example, internally, `malloc' is named `mALLOc') needed
243     when compiling in this case. These look funny but don't otherwise
244     affect anything.
245  WIN32                     (default: undefined)
246     Define this on MS win (95, nt) platforms to compile in sbrk emulation.
247  LACKS_UNISTD_H            (default: undefined)
248     Define this if your system does not have a <unistd.h>.
249  MORECORE                  (default: sbrk)
250     The name of the routine to call to obtain more memory from the system.
251  MORECORE_FAILURE          (default: -1)
252     The value returned upon failure of MORECORE.
253  MORECORE_CLEARS           (default 1)
254     True (1) if the routine mapped to MORECORE zeroes out memory (which
255     holds for sbrk).
256  DEFAULT_TRIM_THRESHOLD
257  DEFAULT_TOP_PAD       
258  DEFAULT_MMAP_THRESHOLD
259  DEFAULT_MMAP_MAX     
260     Default values of tunable parameters (described in detail below)
261     controlling interaction with host system routines (sbrk, mmap, etc).
262     These values may also be changed dynamically via mallopt(). The
263     preset defaults are those that give best performance for typical
264     programs/systems.
265
266
267*/
268
269
270
271
272/* Preliminaries */
273
274#ifndef __STD_C
275#ifdef __STDC__
276#define __STD_C     1
277#else
278#if __cplusplus
279#define __STD_C     1
280#else
281#define __STD_C     0
282#endif /*__cplusplus*/
283#endif /*__STDC__*/
284#endif /*__STD_C*/
285
286#ifndef Void_t
287#if __STD_C
288#define Void_t      void
289#else
290#define Void_t      char
291#endif
292#endif /*Void_t*/
293
294#if __STD_C
295#include <stddef.h>   /* for size_t */
296#else
297#include <sys/types.h>
298#endif
299
300#ifdef __cplusplus
301extern "C" {
302#endif
303
304#include <stdio.h>    /* needed for malloc_stats */
305
306
307/*
308  Compile-time options
309*/
310
311
312/*
313    Debugging:
314
315    Because freed chunks may be overwritten with link fields, this
316    malloc will often die when freed memory is overwritten by user
317    programs.  This can be very effective (albeit in an annoying way)
318    in helping track down dangling pointers.
319
320    If you compile with -DDEBUG, a number of assertion checks are
321    enabled that will catch more memory errors. You probably won't be
322    able to make much sense of the actual assertion errors, but they
323    should help you locate incorrectly overwritten memory.  The
324    checking is fairly extensive, and will slow down execution
325    noticeably. Calling malloc_stats or mallinfo with DEBUG set will
326    attempt to check every non-mmapped allocated and free chunk in the
327    course of computing the summmaries. (By nature, mmapped regions
328    cannot be checked very much automatically.)
329
330    Setting DEBUG may also be helpful if you are trying to modify
331    this code. The assertions in the check routines spell out in more
332    detail the assumptions and invariants underlying the algorithms.
333
334*/
335
336#if DEBUG
337#include <assert.h>
338#else
339#define assert(x) ((void)0)
340#endif
341
342
343/*
344  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
345  of chunk sizes. On a 64-bit machine, you can reduce malloc
346  overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
347  at the expense of not being able to handle requests greater than
348  2^31. This limitation is hardly ever a concern; you are encouraged
349  to set this. However, the default version is the same as size_t.
350*/
351
352#ifndef INTERNAL_SIZE_T
353#define INTERNAL_SIZE_T size_t
354#endif
355
356/*
357  REALLOC_ZERO_BYTES_FREES should be set if a call to
358  realloc with zero bytes should be the same as a call to free.
359  Some people think it should. Otherwise, since this malloc
360  returns a unique pointer for malloc(0), so does realloc(p, 0).
361*/
362
363
364/*   #define REALLOC_ZERO_BYTES_FREES */
365
366
367/*
368  WIN32 causes an emulation of sbrk to be compiled in
369  mmap-based options are not currently supported in WIN32.
370*/
371
372/* #define WIN32 */
373#ifdef WIN32
374#define MORECORE wsbrk
375#define HAVE_MMAP 0
376#endif
377
378
379/*
380  HAVE_MEMCPY should be defined if you are not otherwise using
381  ANSI STD C, but still have memcpy and memset in your C library
382  and want to use them in calloc and realloc. Otherwise simple
383  macro versions are defined here.
384
385  USE_MEMCPY should be defined as 1 if you actually want to
386  have memset and memcpy called. People report that the macro
387  versions are often enough faster than libc versions on many
388  systems that it is better to use them.
389
390*/
391
392#define HAVE_MEMCPY
393
394#ifndef USE_MEMCPY
395#ifdef HAVE_MEMCPY
396#define USE_MEMCPY 1
397#else
398#define USE_MEMCPY 0
399#endif
400#endif
401
402#if (__STD_C || defined(HAVE_MEMCPY))
403
404#if __STD_C
405void* memset(void*, int, size_t);
406void* memcpy(void*, const void*, size_t);
407#else
408Void_t* memset();
409Void_t* memcpy();
410#endif
411#endif
412
413#if USE_MEMCPY
414
415/* The following macros are only invoked with (2n+1)-multiples of
416   INTERNAL_SIZE_T units, with a positive integer n. This is exploited
417   for fast inline execution when n is small. */
418
419#define MALLOC_ZERO(charp, nbytes)                                            \
420do {                                                                          \
421  INTERNAL_SIZE_T mzsz = (nbytes);                                            \
422  if(mzsz <= 9*sizeof(mzsz)) {                                                \
423    INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \
424    if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
425                                     *mz++ = 0;                               \
426      if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
427                                     *mz++ = 0;                               \
428        if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
429                                     *mz++ = 0; }}}                           \
430                                     *mz++ = 0;                               \
431                                     *mz++ = 0;                               \
432                                     *mz   = 0;                               \
433  } else memset((charp), 0, mzsz);                                            \
434} while(0)
435
436#define MALLOC_COPY(dest,src,nbytes)                                          \
437do {                                                                          \
438  INTERNAL_SIZE_T mcsz = (nbytes);                                            \
439  if(mcsz <= 9*sizeof(mcsz)) {                                                \
440    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \
441    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \
442    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
443                                     *mcdst++ = *mcsrc++;                     \
444      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
445                                     *mcdst++ = *mcsrc++;                     \
446        if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
447                                     *mcdst++ = *mcsrc++; }}}                 \
448                                     *mcdst++ = *mcsrc++;                     \
449                                     *mcdst++ = *mcsrc++;                     \
450                                     *mcdst   = *mcsrc  ;                     \
451  } else memcpy(dest, src, mcsz);                                             \
452} while(0)
453
454#else /* !USE_MEMCPY */
455
456/* Use Duff's device for good zeroing/copying performance. */
457
458#define MALLOC_ZERO(charp, nbytes)                                            \
459do {                                                                          \
460  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \
461  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
462  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
463  switch (mctmp) {                                                            \
464    case 0: for(;;) { *mzp++ = 0;                                             \
465    case 7:           *mzp++ = 0;                                             \
466    case 6:           *mzp++ = 0;                                             \
467    case 5:           *mzp++ = 0;                                             \
468    case 4:           *mzp++ = 0;                                             \
469    case 3:           *mzp++ = 0;                                             \
470    case 2:           *mzp++ = 0;                                             \
471    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
472  }                                                                           \
473} while(0)
474
475#define MALLOC_COPY(dest,src,nbytes)                                          \
476do {                                                                          \
477  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \
478  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \
479  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \
480  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
481  switch (mctmp) {                                                            \
482    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
483    case 7:           *mcdst++ = *mcsrc++;                                    \
484    case 6:           *mcdst++ = *mcsrc++;                                    \
485    case 5:           *mcdst++ = *mcsrc++;                                    \
486    case 4:           *mcdst++ = *mcsrc++;                                    \
487    case 3:           *mcdst++ = *mcsrc++;                                    \
488    case 2:           *mcdst++ = *mcsrc++;                                    \
489    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
490  }                                                                           \
491} while(0)
492
493#endif
494
495
496/*
497  Define HAVE_MMAP to optionally make malloc() use mmap() to
498  allocate very large blocks.  These will be returned to the
499  operating system immediately after a free().
500*/
501
502#ifndef HAVE_MMAP
503#define HAVE_MMAP 1
504#endif
505
506/*
507  Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
508  large blocks.  This is currently only possible on Linux with
509  kernel versions newer than 1.3.77.
510*/
511
512#ifndef HAVE_MREMAP
513#ifdef INTERNAL_LINUX_C_LIB
514#define HAVE_MREMAP 1
515#else
516#define HAVE_MREMAP 0
517#endif
518#endif
519
520#if HAVE_MMAP
521
522#include <unistd.h>
523#include <fcntl.h>
524#include <sys/mman.h>
525
526#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
527#define MAP_ANONYMOUS MAP_ANON
528#endif
529
530#endif /* HAVE_MMAP */
531
532/*
533  Access to system page size. To the extent possible, this malloc
534  manages memory from the system in page-size units.
535 
536  The following mechanics for getpagesize were adapted from
537  bsd/gnu getpagesize.h
538*/
539
540#ifndef LACKS_UNISTD_H
541#  include <unistd.h>
542#endif
543
544#ifndef malloc_getpagesize
545#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
546#    ifndef _SC_PAGE_SIZE
547#      define _SC_PAGE_SIZE _SC_PAGESIZE
548#    endif
549#  endif
550#  ifdef _SC_PAGE_SIZE
551#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
552#  else
553#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
554       extern size_t getpagesize();
555#      define malloc_getpagesize getpagesize()
556#    else
557#      include <sys/param.h>
558#      ifdef EXEC_PAGESIZE
559#        define malloc_getpagesize EXEC_PAGESIZE
560#      else
561#        ifdef NBPG
562#          ifndef CLSIZE
563#            define malloc_getpagesize NBPG
564#          else
565#            define malloc_getpagesize (NBPG * CLSIZE)
566#          endif
567#        else
568#          ifdef NBPC
569#            define malloc_getpagesize NBPC
570#          else
571#            ifdef PAGESIZE
572#              define malloc_getpagesize PAGESIZE
573#            else
574#              define malloc_getpagesize (4096) /* just guess */
575#            endif
576#          endif
577#        endif
578#      endif
579#    endif
580#  endif
581#endif
582
583
584
585/*
586
587  This version of malloc supports the standard SVID/XPG mallinfo
588  routine that returns a struct containing the same kind of
589  information you can get from malloc_stats. It should work on
590  any SVID/XPG compliant system that has a /usr/include/malloc.h
591  defining struct mallinfo. (If you'd like to install such a thing
592  yourself, cut out the preliminary declarations as described above
593  and below and save them in a malloc.h file. But there's no
594  compelling reason to bother to do this.)
595
596  The main declaration needed is the mallinfo struct that is returned
597  (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
598  bunch of fields, most of which are not even meaningful in this
599  version of malloc. Some of these fields are are instead filled by
600  mallinfo() with other numbers that might possibly be of interest.
601
602  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
603  /usr/include/malloc.h file that includes a declaration of struct
604  mallinfo.  If so, it is included; else an SVID2/XPG2 compliant
605  version is declared below.  These must be precisely the same for
606  mallinfo() to work.
607
608*/
609
610/* #define HAVE_USR_INCLUDE_MALLOC_H */
611
612#if HAVE_USR_INCLUDE_MALLOC_H
613#include "/usr/include/malloc.h"
614#else
615
616/* SVID2/XPG mallinfo structure */
617
618struct mallinfo {
619  int arena;    /* total space allocated from system */
620  int ordblks;  /* number of non-inuse chunks */
621  int smblks;   /* unused -- always zero */
622  int hblks;    /* number of mmapped regions */
623  int hblkhd;   /* total space in mmapped regions */
624  int usmblks;  /* unused -- always zero */
625  int fsmblks;  /* unused -- always zero */
626  int uordblks; /* total allocated space */
627  int fordblks; /* total non-inuse space */
628  int keepcost; /* top-most, releasable (via malloc_trim) space */
629};     
630
631/* SVID2/XPG mallopt options */
632
633#define M_MXFAST  1    /* UNUSED in this malloc */
634#define M_NLBLKS  2    /* UNUSED in this malloc */
635#define M_GRAIN   3    /* UNUSED in this malloc */
636#define M_KEEP    4    /* UNUSED in this malloc */
637
638#endif
639
640/* mallopt options that actually do something */
641
642#define M_TRIM_THRESHOLD    -1
643#define M_TOP_PAD           -2
644#define M_MMAP_THRESHOLD    -3
645#define M_MMAP_MAX          -4
646
647
648
649#ifndef DEFAULT_TRIM_THRESHOLD
650#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
651#endif
652
653/*
654    M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
655      to keep before releasing via malloc_trim in free().
656
657      Automatic trimming is mainly useful in long-lived programs.
658      Because trimming via sbrk can be slow on some systems, and can
659      sometimes be wasteful (in cases where programs immediately
660      afterward allocate more large chunks) the value should be high
661      enough so that your overall system performance would improve by
662      releasing. 
663
664      The trim threshold and the mmap control parameters (see below)
665      can be traded off with one another. Trimming and mmapping are
666      two different ways of releasing unused memory back to the
667      system. Between these two, it is often possible to keep
668      system-level demands of a long-lived program down to a bare
669      minimum. For example, in one test suite of sessions measuring
670      the XF86 X server on Linux, using a trim threshold of 128K and a
671      mmap threshold of 192K led to near-minimal long term resource
672      consumption. 
673
674      If you are using this malloc in a long-lived program, it should
675      pay to experiment with these values.  As a rough guide, you
676      might set to a value close to the average size of a process
677      (program) running on your system.  Releasing this much memory
678      would allow such a process to run in memory.  Generally, it's
679      worth it to tune for trimming rather tham memory mapping when a
680      program undergoes phases where several large chunks are
681      allocated and released in ways that can reuse each other's
682      storage, perhaps mixed with phases where there are no such
683      chunks at all.  And in well-behaved long-lived programs,
684      controlling release of large blocks via trimming versus mapping
685      is usually faster.
686
687      However, in most programs, these parameters serve mainly as
688      protection against the system-level effects of carrying around
689      massive amounts of unneeded memory. Since frequent calls to
690      sbrk, mmap, and munmap otherwise degrade performance, the default
691      parameters are set to relatively high values that serve only as
692      safeguards.
693
694      The default trim value is high enough to cause trimming only in
695      fairly extreme (by current memory consumption standards) cases.
696      It must be greater than page size to have any useful effect.  To
697      disable trimming completely, you can set to (unsigned long)(-1);
698
699
700*/
701
702
703#ifndef DEFAULT_TOP_PAD
704#define DEFAULT_TOP_PAD        (0)
705#endif
706
707/*
708    M_TOP_PAD is the amount of extra `padding' space to allocate or
709      retain whenever sbrk is called. It is used in two ways internally:
710
711      * When sbrk is called to extend the top of the arena to satisfy
712        a new malloc request, this much padding is added to the sbrk
713        request.
714
715      * When malloc_trim is called automatically from free(),
716        it is used as the `pad' argument.
717
718      In both cases, the actual amount of padding is rounded
719      so that the end of the arena is always a system page boundary.
720
721      The main reason for using padding is to avoid calling sbrk so
722      often. Having even a small pad greatly reduces the likelihood
723      that nearly every malloc request during program start-up (or
724      after trimming) will invoke sbrk, which needlessly wastes
725      time.
726
727      Automatic rounding-up to page-size units is normally sufficient
728      to avoid measurable overhead, so the default is 0.  However, in
729      systems where sbrk is relatively slow, it can pay to increase
730      this value, at the expense of carrying around more memory than
731      the program needs.
732
733*/
734
735
736#ifndef DEFAULT_MMAP_THRESHOLD
737#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
738#endif
739
740/*
741
742    M_MMAP_THRESHOLD is the request size threshold for using mmap()
743      to service a request. Requests of at least this size that cannot
744      be allocated using already-existing space will be serviced via mmap. 
745      (If enough normal freed space already exists it is used instead.)
746
747      Using mmap segregates relatively large chunks of memory so that
748      they can be individually obtained and released from the host
749      system. A request serviced through mmap is never reused by any
750      other request (at least not directly; the system may just so
751      happen to remap successive requests to the same locations).
752
753      Segregating space in this way has the benefit that mmapped space
754      can ALWAYS be individually released back to the system, which
755      helps keep the system level memory demands of a long-lived
756      program low. Mapped memory can never become `locked' between
757      other chunks, as can happen with normally allocated chunks, which
758      menas that even trimming via malloc_trim would not release them.
759
760      However, it has the disadvantages that:
761
762         1. The space cannot be reclaimed, consolidated, and then
763            used to service later requests, as happens with normal chunks.
764         2. It can lead to more wastage because of mmap page alignment
765            requirements
766         3. It causes malloc performance to be more dependent on host
767            system memory management support routines which may vary in
768            implementation quality and may impose arbitrary
769            limitations. Generally, servicing a request via normal
770            malloc steps is faster than going through a system's mmap.
771
772      All together, these considerations should lead you to use mmap
773      only for relatively large requests. 
774
775
776*/
777
778
779
780#ifndef DEFAULT_MMAP_MAX
781#if HAVE_MMAP
782#define DEFAULT_MMAP_MAX       (64)
783#else
784#define DEFAULT_MMAP_MAX       (0)
785#endif
786#endif
787
788/*
789    M_MMAP_MAX is the maximum number of requests to simultaneously
790      service using mmap. This parameter exists because:
791
792         1. Some systems have a limited number of internal tables for
793            use by mmap.
794         2. In most systems, overreliance on mmap can degrade overall
795            performance.
796         3. If a program allocates many large regions, it is probably
797            better off using normal sbrk-based allocation routines that
798            can reclaim and reallocate normal heap memory. Using a
799            small value allows transition into this mode after the
800            first few allocations.
801
802      Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,
803      the default value is 0, and attempts to set it to non-zero values
804      in mallopt will fail.
805*/
806
807
808
809
810/*
811
812  Special defines for linux libc
813
814  Except when compiled using these special defines for Linux libc
815  using weak aliases, this malloc is NOT designed to work in
816  multithreaded applications.  No semaphores or other concurrency
817  control are provided to ensure that multiple malloc or free calls
818  don't run at the same time, which could be disasterous. A single
819  semaphore could be used across malloc, realloc, and free (which is
820  essentially the effect of the linux weak alias approach). It would
821  be hard to obtain finer granularity.
822
823*/
824
825
826#ifdef INTERNAL_LINUX_C_LIB
827
828#if __STD_C
829
830Void_t * __default_morecore_init (ptrdiff_t);
831Void_t *(*__morecore)(ptrdiff_t) = __default_morecore_init;
832
833#else
834
835Void_t * __default_morecore_init ();
836Void_t *(*__morecore)() = __default_morecore_init;
837
838#endif
839
840#define MORECORE (*__morecore)
841#define MORECORE_FAILURE 0
842#define MORECORE_CLEARS 1
843
844#else /* INTERNAL_LINUX_C_LIB */
845
846#if __STD_C
847extern Void_t*     sbrk(ptrdiff_t);
848#else
849extern Void_t*     sbrk();
850#endif
851
852#ifndef MORECORE
853#define MORECORE sbrk
854#endif
855
856#ifndef MORECORE_FAILURE
857#define MORECORE_FAILURE -1
858#endif
859
860#ifndef MORECORE_CLEARS
861#define MORECORE_CLEARS 1
862#endif
863
864#endif /* INTERNAL_LINUX_C_LIB */
865
866#if defined(INTERNAL_LINUX_C_LIB) && defined(__ELF__)
867
868#define cALLOc          __libc_calloc
869#define fREe            __libc_free
870#define mALLOc          __libc_malloc
871#define mEMALIGn        __libc_memalign
872#define rEALLOc         __libc_realloc
873#define vALLOc          __libc_valloc
874#define pvALLOc         __libc_pvalloc
875#define mALLINFo        __libc_mallinfo
876#define mALLOPt         __libc_mallopt
877
878#pragma weak calloc = __libc_calloc
879#pragma weak free = __libc_free
880#pragma weak cfree = __libc_free
881#pragma weak malloc = __libc_malloc
882#pragma weak memalign = __libc_memalign
883#pragma weak realloc = __libc_realloc
884#pragma weak valloc = __libc_valloc
885#pragma weak pvalloc = __libc_pvalloc
886#pragma weak mallinfo = __libc_mallinfo
887#pragma weak mallopt = __libc_mallopt
888
889#else
890
891#if OM_PROVIDE_MALLOC < 0
892#define cALLOc          calloc
893#define fREe            free
894#define mALLOc          malloc
895#define rEALLOc         realloc
896#define mEMALIGn        memalign
897#define vALLOc          valloc
898#define pvALLOc         pvalloc
899#define mALLINFo        mallinfo
900#define mALLOPt         mallopt
901#endif
902
903#endif
904
905/* Public routines */
906
907#if __STD_C
908
909Void_t* mALLOc(size_t);
910void    fREe(Void_t*);
911Void_t* rEALLOc(Void_t*, size_t);
912Void_t* mEMALIGn(size_t, size_t);
913Void_t* vALLOc(size_t);
914Void_t* pvALLOc(size_t);
915Void_t* cALLOc(size_t, size_t);
916void    cfree(Void_t*);
917int     malloc_trim(size_t);
918size_t  malloc_usable_size(Void_t*);
919void    malloc_stats();
920int     mALLOPt(int, int);
921struct mallinfo mALLINFo(void);
922#else
923Void_t* mALLOc();
924void    fREe();
925Void_t* rEALLOc();
926Void_t* mEMALIGn();
927Void_t* vALLOc();
928Void_t* pvALLOc();
929Void_t* cALLOc();
930void    cfree();
931int     malloc_trim();
932size_t  malloc_usable_size();
933void    malloc_stats();
934int     mALLOPt();
935struct mallinfo mALLINFo();
936#endif
937
938
939#ifdef __cplusplus
940};  /* end of extern "C" */
941#endif
942
943/* ---------- To make a malloc.h, end cutting here ------------ */
944
945#endif /* DL_MALLOC_H */
Note: See TracBrowser for help on using the repository browser.