source: git/Singular/gmalloc.c @ 136879

spielwiese
Last change on this file since 136879 was 136879, checked in by Hans Schönemann <hannes@…>, 25 years ago
*hannes: GLIBC-fix git-svn-id: file:///usr/local/Singular/svn/trunk@3137 2c84dea3-7e68-4137-9b89-c4e89433aadc
  • Property mode set to 100644
File size: 41.0 KB
Line 
1/****************************************
2*  Computer Algebra System SINGULAR     *
3****************************************/
4/* $Id: */
5
6/* gmalloc used by Singular to have a trusted malloc and valloc
7   slightly edited to include mod2.h and to only provide its functionality
8   if HAVE_GMALLOC is defined
9*/   
10
11#ifdef HAVE_CONFIG_H
12#include "mod2.h"
13#endif
14
15/* #ifdef HAVE_GMALLOC */
16#if 1
17
18
19#define _MALLOC_INTERNAL
20
21/* The malloc headers and source files from the C library follow here.  */
22
23/* Declarations for `malloc' and friends.
24   Copyright 1990, 1991, 1992, 1993, 1995 Free Software Foundation, Inc.
25                  Written May 1989 by Mike Haertel.
26
27This library is free software; you can redistribute it and/or
28modify it under the terms of the GNU Library General Public License as
29published by the Free Software Foundation; either version 2 of the
30License, or (at your option) any later version.
31
32This library is distributed in the hope that it will be useful,
33but WITHOUT ANY WARRANTY; without even the implied warranty of
34MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
35Library General Public License for more details.
36
37You should have received a copy of the GNU Library General Public
38License along with this library; see the file COPYING.LIB.  If
39not, write to the Free Software Foundation, Inc., 675 Mass Ave,
40Cambridge, MA 02139, USA.
41
42   The author may be reached (Email) at the address mike@ai.mit.edu,
43   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
44
45#ifndef _MALLOC_H
46
47#define _MALLOC_H       1
48
49#ifdef _MALLOC_INTERNAL
50
51#if     defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
52#include <string.h>
53#else
54#ifndef memset
55#define memset(s, zero, n)      bzero ((s), (n))
56#endif
57#ifndef memcpy
58#define memcpy(d, s, n)         bcopy ((s), (d), (n))
59#endif
60#endif
61
62#if     defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
63#include <limits.h>
64#else
65#ifndef CHAR_BIT
66#define CHAR_BIT        8
67#endif
68#endif
69
70#ifdef  HAVE_UNISTD_H
71#include <unistd.h>
72#endif
73
74#endif  /* _MALLOC_INTERNAL.  */
75
76
77#ifdef  __cplusplus
78extern "C"
79{
80#endif
81
82#if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
83#undef  __P
84#define __P(args)       args
85#undef  __ptr_t
86#define __ptr_t         void *
87#else /* Not C++ or ANSI C.  */
88#undef  __P
89#define __P(args)       ()
90#undef  const
91#define const
92#undef  __ptr_t
93#define __ptr_t         char *
94#endif /* C++ or ANSI C.  */
95
96#if defined (__STDC__) && __STDC__
97#include <stddef.h>
98#define __malloc_size_t         size_t
99#define __malloc_ptrdiff_t      ptrdiff_t
100#else
101#define __malloc_size_t         unsigned int
102#define __malloc_ptrdiff_t      int
103#endif
104
105#ifndef NULL
106#define NULL    0
107#endif
108
109
110/* Allocate SIZE bytes of memory.  */
111extern __ptr_t malloc __P ((__malloc_size_t __size));
112/* Re-allocate the previously allocated block
113   in __ptr_t, making the new block SIZE bytes long.  */
114extern __ptr_t realloc __P ((__ptr_t __ptr, __malloc_size_t __size));
115/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
116extern __ptr_t calloc __P ((__malloc_size_t __nmemb, __malloc_size_t __size));
117/* Free a block allocated by `malloc', `realloc' or `calloc'.  */
118extern void free __P ((__ptr_t __ptr));
119
120/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
121extern __ptr_t memalign __P ((__malloc_size_t __alignment,
122                              __malloc_size_t __size));
123
124/* Allocate SIZE bytes on a page boundary.  */
125extern __ptr_t valloc __P ((__malloc_size_t __size));
126
127
128#ifdef _MALLOC_INTERNAL
129
130/* The allocator divides the heap into blocks of fixed size; large
131   requests receive one or more whole blocks, and small requests
132   receive a fragment of a block.  Fragment sizes are powers of two,
133   and all fragments of a block are the same size.  When all the
134   fragments in a block have been freed, the block itself is freed.  */
135#define INT_BIT         (CHAR_BIT * sizeof(int))
136#define BLOCKLOG        (INT_BIT > 16 ? 12 : 9)
137#define BLOCKSIZE       (1 << BLOCKLOG)
138#define BLOCKIFY(SIZE)  (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
139
140/* Determine the amount of memory spanned by the initial heap table
141   (not an absolute limit).  */
142#define HEAP            (INT_BIT > 16 ? 4194304 : 65536)
143
144/* Number of contiguous free blocks allowed to build up at the end of
145   memory before they will be returned to the system.  */
146#define FINAL_FREE_BLOCKS       8
147
148/* Data structure giving per-block information.  */
149typedef union
150  {
151    /* Heap information for a busy block.  */
152    struct
153      {
154        /* Zero for a large (multiblock) object, or positive giving the
155           logarithm to the base two of the fragment size.  */
156        int type;
157        union
158          {
159            struct
160              {
161                __malloc_size_t nfree; /* Free frags in a fragmented block.  */
162                __malloc_size_t first; /* First free fragment of the block.  */
163              } frag;
164            /* For a large object, in its first block, this has the number
165               of blocks in the object.  In the other blocks, this has a
166               negative number which says how far back the first block is.  */
167            __malloc_ptrdiff_t size;
168          } info;
169      } busy;
170    /* Heap information for a free block
171       (that may be the first of a free cluster).  */
172    struct
173      {
174        __malloc_size_t size;   /* Size (in blocks) of a free cluster.  */
175        __malloc_size_t next;   /* Index of next free cluster.  */
176        __malloc_size_t prev;   /* Index of previous free cluster.  */
177      } free;
178  } malloc_info;
179
180/* Pointer to first block of the heap.  */
181extern char *_heapbase;
182
183/* Table indexed by block number giving per-block information.  */
184extern malloc_info *_heapinfo;
185
186/* Address to block number and vice versa.  */
187#define BLOCK(A)        (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
188#define ADDRESS(B)      ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
189
190/* Current search index for the heap table.  */
191extern __malloc_size_t _heapindex;
192
193/* Limit of valid info table indices.  */
194extern __malloc_size_t _heaplimit;
195
196/* Doubly linked lists of free fragments.  */
197struct list
198  {
199    struct list *next;
200    struct list *prev;
201  };
202
203/* Free list headers for each fragment size.  */
204extern struct list _fraghead[];
205
206/* List of blocks allocated with `memalign' (or `valloc').  */
207struct alignlist
208  {
209    struct alignlist *next;
210    __ptr_t aligned;            /* The address that memaligned returned.  */
211    __ptr_t exact;              /* The address that malloc returned.  */
212  };
213extern struct alignlist *_aligned_blocks;
214
215/* Instrumentation.  */
216extern __malloc_size_t _chunks_used;
217extern __malloc_size_t _bytes_used;
218extern __malloc_size_t _chunks_free;
219extern __malloc_size_t _bytes_free;
220
221/* Internal version of `free' used in `morecore' (malloc.c). */
222extern void _free_internal __P ((__ptr_t __ptr));
223
224#endif /* _MALLOC_INTERNAL.  */
225
226/* Given an address in the middle of a malloc'd object,
227   return the address of the beginning of the object.  */
228extern __ptr_t malloc_find_object_address __P ((__ptr_t __ptr));
229
230/* Underlying allocation function; successive calls should
231   return contiguous pieces of memory.  */
232extern __ptr_t (*__morecore) __P ((__malloc_ptrdiff_t __size));
233
234/* Default value of `__morecore'.  */
235extern __ptr_t __default_morecore __P ((__malloc_ptrdiff_t __size));
236
237/* If not NULL, this function is called after each time
238   `__morecore' is called to increase the data size.  */
239extern void (*__after_morecore_hook) __P ((void));
240
241/* Nonzero if `malloc' has been called and done its initialization.  */
242extern int __malloc_initialized;
243
244/* Hooks for debugging versions.  */
245extern void (*__malloc_initialize_hook) __P ((void));
246extern void (*__free_hook) __P ((__ptr_t __ptr));
247extern __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
248extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
249extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size,
250                                        __malloc_size_t __alignment));
251
252/* Return values for `mprobe': these are the kinds of inconsistencies that
253   `mcheck' enables detection of.  */
254enum mcheck_status
255  {
256    MCHECK_DISABLED = -1,       /* Consistency checking is not turned on.  */
257    MCHECK_OK,                  /* Block is fine.  */
258    MCHECK_FREE,                /* Block freed twice.  */
259    MCHECK_HEAD,                /* Memory before the block was clobbered.  */
260    MCHECK_TAIL                 /* Memory after the block was clobbered.  */
261  };
262
263/* Activate a standard collection of debugging hooks.  This must be called
264   before `malloc' is ever called.  ABORTFUNC is called with an error code
265   (see enum above) when an inconsistency is detected.  If ABORTFUNC is
266   null, the standard function prints on stderr and then calls `abort'.  */
267extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
268
269/* Check for aberrations in a particular malloc'd block.  You must have
270   called `mcheck' already.  These are the same checks that `mcheck' does
271   when you free or reallocate a block.  */
272extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
273
274/* Activate a standard collection of tracing hooks.  */
275extern void mtrace __P ((void));
276extern void muntrace __P ((void));
277
278/* Statistics available to the user.  */
279struct mstats
280  {
281    __malloc_size_t bytes_total; /* Total size of the heap. */
282    __malloc_size_t chunks_used; /* Chunks allocated by the user. */
283    __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
284    __malloc_size_t chunks_free; /* Chunks in the free list. */
285    __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
286  };
287
288/* Pick up the current statistics. */
289extern struct mstats mstats __P ((void));
290
291/* Call WARNFUN with a warning message when memory usage is high.  */
292extern void memory_warnings __P ((__ptr_t __start,
293                                  void (*__warnfun) __P ((const char *))));
294
295
296/* Relocating allocator.  */
297
298/* Allocate SIZE bytes, and store the address in *HANDLEPTR.  */
299extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
300
301/* Free the storage allocated in HANDLEPTR.  */
302extern void r_alloc_free __P ((__ptr_t *__handleptr));
303
304/* Adjust the block at HANDLEPTR to be SIZE bytes long.  */
305extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
306
307
308#ifdef  __cplusplus
309}
310#endif
311
312#endif /* malloc.h  */
313/* Allocate memory on a page boundary.
314   Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
315
316This library is free software; you can redistribute it and/or
317modify it under the terms of the GNU Library General Public License as
318published by the Free Software Foundation; either version 2 of the
319License, or (at your option) any later version.
320
321This library is distributed in the hope that it will be useful,
322but WITHOUT ANY WARRANTY; without even the implied warranty of
323MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
324Library General Public License for more details.
325
326You should have received a copy of the GNU Library General Public
327License along with this library; see the file COPYING.LIB.  If
328not, write to the Free Software Foundation, Inc., 675 Mass Ave,
329Cambridge, MA 02139, USA.
330
331   The author may be reached (Email) at the address mike@ai.mit.edu,
332   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
333
334#if defined (__GNU_LIBRARY__) || defined (_LIBC)
335#include <stddef.h>
336#include <sys/cdefs.h>
337/* obachman: no declaration: conflicts with gnulibc6 unistd.h */
338/* extern size_t __getpagesize __P ((void)); */
339#else
340#if 0 /* obachman: pasted in getpagesize.h manually */
341#include "getpagesize.h"
342#else
343
344#ifdef VMS
345#define getpagesize() 512
346#endif
347
348#ifdef HAVE_UNISTD_H
349#include <unistd.h>
350#endif
351
352#ifdef _SC_PAGESIZE
353#define getpagesize() sysconf(_SC_PAGESIZE)
354#else
355
356#include <sys/param.h>
357
358#ifdef EXEC_PAGESIZE
359#define getpagesize() EXEC_PAGESIZE
360#else
361#ifdef NBPG
362#define getpagesize() NBPG * CLSIZE
363#ifndef CLSIZE
364#define CLSIZE 1
365#endif /* no CLSIZE */
366#else /* no NBPG */
367#ifdef NBPC
368#define getpagesize() NBPC
369#else /* no NBPC */
370#ifdef PAGESIZE
371#define getpagesize() PAGESIZE
372#endif
373#endif /* NBPC */
374#endif /* no NBPG */
375#endif /* no EXEC_PAGESIZE */
376#endif /* no _SC_PAGESIZE */
377
378/* obachman: undef , gnulibc6 conflict with unistd.h */
379#define  __getpagesize()        getpagesize()
380#endif /* if 0 */
381#endif
382
383#ifndef _MALLOC_INTERNAL
384#define _MALLOC_INTERNAL
385#include <malloc.h>
386#endif
387
388static __malloc_size_t pagesize;
389
390__ptr_t
391valloc (size)
392     __malloc_size_t size;
393{
394  if (pagesize == 0)
395/* obachman: use getpagesize, instead
396    pagesize = __getpagesize ();
397*/
398    pagesize = getpagesize ();
399
400  return memalign (pagesize, size);
401}
402/* Memory allocator `malloc'.
403   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
404                  Written May 1989 by Mike Haertel.
405
406This library is free software; you can redistribute it and/or
407modify it under the terms of the GNU Library General Public License as
408published by the Free Software Foundation; either version 2 of the
409License, or (at your option) any later version.
410
411This library is distributed in the hope that it will be useful,
412but WITHOUT ANY WARRANTY; without even the implied warranty of
413MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
414Library General Public License for more details.
415
416You should have received a copy of the GNU Library General Public
417License along with this library; see the file COPYING.LIB.  If
418not, write to the Free Software Foundation, Inc., 675 Mass Ave,
419Cambridge, MA 02139, USA.
420
421   The author may be reached (Email) at the address mike@ai.mit.edu,
422   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
423
424#ifndef _MALLOC_INTERNAL
425#define _MALLOC_INTERNAL
426#include <malloc.h>
427#endif
428
429/* How to really get more memory.  */
430__ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
431
432/* Debugging hook for `malloc'.  */
433__ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
434
435/* Pointer to the base of the first block.  */
436char *_heapbase;
437
438/* Block information table.  Allocated with align/__free (not malloc/free).  */
439malloc_info *_heapinfo;
440
441/* Number of info entries.  */
442static __malloc_size_t heapsize;
443
444/* Search index in the info table.  */
445__malloc_size_t _heapindex;
446
447/* Limit of valid info table indices.  */
448__malloc_size_t _heaplimit;
449
450/* Free lists for each fragment size.  */
451struct list _fraghead[BLOCKLOG];
452
453/* Instrumentation.  */
454__malloc_size_t _chunks_used;
455__malloc_size_t _bytes_used;
456__malloc_size_t _chunks_free;
457__malloc_size_t _bytes_free;
458
459/* Are you experienced?  */
460int __malloc_initialized;
461
462void (*__malloc_initialize_hook) __P ((void));
463void (*__after_morecore_hook) __P ((void));
464
465/* Aligned allocation.  */
466static __ptr_t align __P ((__malloc_size_t));
467static __ptr_t
468align (size)
469     __malloc_size_t size;
470{
471  __ptr_t result;
472  unsigned long int adj;
473
474  result = (*__morecore) (size);
475  adj = (unsigned long int) ((unsigned long int) ((char *) result -
476                                                  (char *) NULL)) % BLOCKSIZE;
477  if (adj != 0)
478    {
479      adj = BLOCKSIZE - adj;
480      (void) (*__morecore) (adj);
481      result = (char *) result + adj;
482    }
483
484  if (__after_morecore_hook)
485    (*__after_morecore_hook) ();
486
487  return result;
488}
489
490/* Set everything up and remember that we have.  */
491static int initialize __P ((void));
492static int
493initialize ()
494{
495  if (__malloc_initialize_hook)
496    (*__malloc_initialize_hook) ();
497
498  heapsize = HEAP / BLOCKSIZE;
499  _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
500  if (_heapinfo == NULL)
501    return 0;
502  memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
503  _heapinfo[0].free.size = 0;
504  _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
505  _heapindex = 0;
506  _heapbase = (char *) _heapinfo;
507
508  /* Account for the _heapinfo block itself in the statistics.  */
509  _bytes_used = heapsize * sizeof (malloc_info);
510  _chunks_used = 1;
511
512  __malloc_initialized = 1;
513  return 1;
514}
515
516/* Get neatly aligned memory, initializing or
517   growing the heap info table as necessary. */
518static __ptr_t morecore __P ((__malloc_size_t));
519static __ptr_t
520morecore (size)
521     __malloc_size_t size;
522{
523  __ptr_t result;
524  malloc_info *newinfo, *oldinfo;
525  __malloc_size_t newsize;
526
527  result = align (size);
528  if (result == NULL)
529    return NULL;
530
531  /* Check if we need to grow the info table.  */
532  if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
533    {
534      newsize = heapsize;
535      while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
536        newsize *= 2;
537      newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
538      if (newinfo == NULL)
539        {
540          (*__morecore) (-size);
541          return NULL;
542        }
543      memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
544      memset (&newinfo[heapsize], 0,
545              (newsize - heapsize) * sizeof (malloc_info));
546      oldinfo = _heapinfo;
547      newinfo[BLOCK (oldinfo)].busy.type = 0;
548      newinfo[BLOCK (oldinfo)].busy.info.size
549        = BLOCKIFY (heapsize * sizeof (malloc_info));
550      _heapinfo = newinfo;
551      /* Account for the _heapinfo block itself in the statistics.  */
552      _bytes_used += newsize * sizeof (malloc_info);
553      ++_chunks_used;
554      _free_internal (oldinfo);
555      heapsize = newsize;
556    }
557
558  _heaplimit = BLOCK ((char *) result + size);
559  return result;
560}
561
562/* Allocate memory from the heap.  */
563__ptr_t
564malloc (size)
565     __malloc_size_t size;
566{
567  __ptr_t result;
568  __malloc_size_t block, blocks, lastblocks, start;
569  register __malloc_size_t i;
570  struct list *next;
571
572  /* ANSI C allows `malloc (0)' to either return NULL, or to return a
573     valid address you can realloc and free (though not dereference).
574
575     It turns out that some extant code (sunrpc, at least Ultrix's version)
576     expects `malloc (0)' to return non-NULL and breaks otherwise.
577     Be compatible.  */
578
579#if     0
580  if (size == 0)
581    return NULL;
582#endif
583
584  if (__malloc_hook != NULL)
585    return (*__malloc_hook) (size);
586
587  if (!__malloc_initialized)
588    if (!initialize ())
589      return NULL;
590
591  if (size < sizeof (struct list))
592    size = sizeof (struct list);
593
594#ifdef SUNOS_LOCALTIME_BUG
595  if (size < 16)
596    size = 16;
597#endif
598
599  /* Determine the allocation policy based on the request size.  */
600  if (size <= BLOCKSIZE / 2)
601    {
602      /* Small allocation to receive a fragment of a block.
603         Determine the logarithm to base two of the fragment size. */
604      register __malloc_size_t log = 1;
605      --size;
606      while ((size /= 2) != 0)
607        ++log;
608
609      /* Look in the fragment lists for a
610         free fragment of the desired size. */
611      next = _fraghead[log].next;
612      if (next != NULL)
613        {
614          /* There are free fragments of this size.
615             Pop a fragment out of the fragment list and return it.
616             Update the block's nfree and first counters. */
617          result = (__ptr_t) next;
618          next->prev->next = next->next;
619          if (next->next != NULL)
620            next->next->prev = next->prev;
621          block = BLOCK (result);
622          if (--_heapinfo[block].busy.info.frag.nfree != 0)
623            _heapinfo[block].busy.info.frag.first = (unsigned long int)
624              ((unsigned long int) ((char *) next->next - (char *) NULL)
625               % BLOCKSIZE) >> log;
626
627          /* Update the statistics.  */
628          ++_chunks_used;
629          _bytes_used += 1 << log;
630          --_chunks_free;
631          _bytes_free -= 1 << log;
632        }
633      else
634        {
635          /* No free fragments of the desired size, so get a new block
636             and break it into fragments, returning the first.  */
637          result = malloc (BLOCKSIZE);
638          if (result == NULL)
639            return NULL;
640
641          /* Link all fragments but the first into the free list.  */
642          for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
643            {
644              next = (struct list *) ((char *) result + (i << log));
645              next->next = _fraghead[log].next;
646              next->prev = &_fraghead[log];
647              next->prev->next = next;
648              if (next->next != NULL)
649                next->next->prev = next;
650            }
651
652          /* Initialize the nfree and first counters for this block.  */
653          block = BLOCK (result);
654          _heapinfo[block].busy.type = log;
655          _heapinfo[block].busy.info.frag.nfree = i - 1;
656          _heapinfo[block].busy.info.frag.first = i - 1;
657
658          _chunks_free += (BLOCKSIZE >> log) - 1;
659          _bytes_free += BLOCKSIZE - (1 << log);
660          _bytes_used -= BLOCKSIZE - (1 << log);
661        }
662    }
663  else
664    {
665      /* Large allocation to receive one or more blocks.
666         Search the free list in a circle starting at the last place visited.
667         If we loop completely around without finding a large enough
668         space we will have to get more memory from the system.  */
669      blocks = BLOCKIFY (size);
670      start = block = _heapindex;
671      while (_heapinfo[block].free.size < blocks)
672        {
673          block = _heapinfo[block].free.next;
674          if (block == start)
675            {
676              /* Need to get more from the system.  Check to see if
677                 the new core will be contiguous with the final free
678                 block; if so we don't need to get as much.  */
679              block = _heapinfo[0].free.prev;
680              lastblocks = _heapinfo[block].free.size;
681              if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
682                  (*__morecore) (0) == ADDRESS (block + lastblocks) &&
683                  (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
684                {
685                  /* Which block we are extending (the `final free
686                     block' referred to above) might have changed, if
687                     it got combined with a freed info table.  */
688                  block = _heapinfo[0].free.prev;
689                  _heapinfo[block].free.size += (blocks - lastblocks);
690                  _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
691                  continue;
692                }
693              result = morecore (blocks * BLOCKSIZE);
694              if (result == NULL)
695                return NULL;
696              block = BLOCK (result);
697              _heapinfo[block].busy.type = 0;
698              _heapinfo[block].busy.info.size = blocks;
699              ++_chunks_used;
700              _bytes_used += blocks * BLOCKSIZE;
701              return result;
702            }
703        }
704
705      /* At this point we have found a suitable free list entry.
706         Figure out how to remove what we need from the list. */
707      result = ADDRESS (block);
708      if (_heapinfo[block].free.size > blocks)
709        {
710          /* The block we found has a bit left over,
711             so relink the tail end back into the free list. */
712          _heapinfo[block + blocks].free.size
713            = _heapinfo[block].free.size - blocks;
714          _heapinfo[block + blocks].free.next
715            = _heapinfo[block].free.next;
716          _heapinfo[block + blocks].free.prev
717            = _heapinfo[block].free.prev;
718          _heapinfo[_heapinfo[block].free.prev].free.next
719            = _heapinfo[_heapinfo[block].free.next].free.prev
720            = _heapindex = block + blocks;
721        }
722      else
723        {
724          /* The block exactly matches our requirements,
725             so just remove it from the list. */
726          _heapinfo[_heapinfo[block].free.next].free.prev
727            = _heapinfo[block].free.prev;
728          _heapinfo[_heapinfo[block].free.prev].free.next
729            = _heapindex = _heapinfo[block].free.next;
730          --_chunks_free;
731        }
732
733      _heapinfo[block].busy.type = 0;
734      _heapinfo[block].busy.info.size = blocks;
735      ++_chunks_used;
736      _bytes_used += blocks * BLOCKSIZE;
737      _bytes_free -= blocks * BLOCKSIZE;
738
739      /* Mark all the blocks of the object just allocated except for the
740         first with a negative number so you can find the first block by
741         adding that adjustment.  */
742      while (--blocks > 0)
743        _heapinfo[block + blocks].busy.info.size = -blocks;
744    }
745
746  return result;
747}
748
749#ifndef _LIBC
750
751/* On some ANSI C systems, some libc functions call _malloc, _free
752   and _realloc.  Make them use the GNU functions.  */
753
754__ptr_t
755_malloc (size)
756     __malloc_size_t size;
757{
758  return malloc (size);
759}
760
761void
762_free (ptr)
763     __ptr_t ptr;
764{
765  free (ptr);
766}
767
768__ptr_t
769_realloc (ptr, size)
770     __ptr_t ptr;
771     __malloc_size_t size;
772{
773  return realloc (ptr, size);
774}
775
776#endif
777/* Free a block of memory allocated by `malloc'.
778   Copyright 1990, 1991, 1992, 1994 Free Software Foundation, Inc.
779                  Written May 1989 by Mike Haertel.
780
781This library is free software; you can redistribute it and/or
782modify it under the terms of the GNU Library General Public License as
783published by the Free Software Foundation; either version 2 of the
784License, or (at your option) any later version.
785
786This library is distributed in the hope that it will be useful,
787but WITHOUT ANY WARRANTY; without even the implied warranty of
788MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
789Library General Public License for more details.
790
791You should have received a copy of the GNU Library General Public
792License along with this library; see the file COPYING.LIB.  If
793not, write to the Free Software Foundation, Inc., 675 Mass Ave,
794Cambridge, MA 02139, USA.
795
796   The author may be reached (Email) at the address mike@ai.mit.edu,
797   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
798
799#ifndef _MALLOC_INTERNAL
800#define _MALLOC_INTERNAL
801#include <malloc.h>
802#endif
803
804/* Debugging hook for free.  */
805void (*__free_hook) __P ((__ptr_t __ptr));
806
807/* List of blocks allocated by memalign.  */
808struct alignlist *_aligned_blocks = NULL;
809
810/* Return memory to the heap.
811   Like `free' but don't call a __free_hook if there is one.  */
812void
813_free_internal (ptr)
814     __ptr_t ptr;
815{
816  int type;
817  __malloc_size_t block, blocks;
818  register __malloc_size_t i;
819  struct list *prev, *next;
820
821  block = BLOCK (ptr);
822
823  type = _heapinfo[block].busy.type;
824  switch (type)
825    {
826    case 0:
827      /* Get as many statistics as early as we can.  */
828      --_chunks_used;
829      _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
830      _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
831
832      /* Find the free cluster previous to this one in the free list.
833         Start searching at the last block referenced; this may benefit
834         programs with locality of allocation.  */
835      i = _heapindex;
836      if (i > block)
837        while (i > block)
838          i = _heapinfo[i].free.prev;
839      else
840        {
841          do
842            i = _heapinfo[i].free.next;
843          while (i > 0 && i < block);
844          i = _heapinfo[i].free.prev;
845        }
846
847      /* Determine how to link this block into the free list.  */
848      if (block == i + _heapinfo[i].free.size)
849        {
850          /* Coalesce this block with its predecessor.  */
851          _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
852          block = i;
853        }
854      else
855        {
856          /* Really link this block back into the free list.  */
857          _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
858          _heapinfo[block].free.next = _heapinfo[i].free.next;
859          _heapinfo[block].free.prev = i;
860          _heapinfo[i].free.next = block;
861          _heapinfo[_heapinfo[block].free.next].free.prev = block;
862          ++_chunks_free;
863        }
864
865      /* Now that the block is linked in, see if we can coalesce it
866         with its successor (by deleting its successor from the list
867         and adding in its size).  */
868      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
869        {
870          _heapinfo[block].free.size
871            += _heapinfo[_heapinfo[block].free.next].free.size;
872          _heapinfo[block].free.next
873            = _heapinfo[_heapinfo[block].free.next].free.next;
874          _heapinfo[_heapinfo[block].free.next].free.prev = block;
875          --_chunks_free;
876        }
877
878      /* Now see if we can return stuff to the system.  */
879      blocks = _heapinfo[block].free.size;
880      if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
881          && (*__morecore) (0) == ADDRESS (block + blocks))
882        {
883          register __malloc_size_t bytes = blocks * BLOCKSIZE;
884          _heaplimit -= blocks;
885          (*__morecore) (-bytes);
886          _heapinfo[_heapinfo[block].free.prev].free.next
887            = _heapinfo[block].free.next;
888          _heapinfo[_heapinfo[block].free.next].free.prev
889            = _heapinfo[block].free.prev;
890          block = _heapinfo[block].free.prev;
891          --_chunks_free;
892          _bytes_free -= bytes;
893        }
894
895      /* Set the next search to begin at this block.  */
896      _heapindex = block;
897      break;
898
899    default:
900      /* Do some of the statistics.  */
901      --_chunks_used;
902      _bytes_used -= 1 << type;
903      ++_chunks_free;
904      _bytes_free += 1 << type;
905
906      /* Get the address of the first free fragment in this block.  */
907      prev = (struct list *) ((char *) ADDRESS (block) +
908                           (_heapinfo[block].busy.info.frag.first << type));
909
910      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
911        {
912          /* If all fragments of this block are free, remove them
913             from the fragment list and free the whole block.  */
914          next = prev;
915          for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
916            next = next->next;
917          prev->prev->next = next;
918          if (next != NULL)
919            next->prev = prev->prev;
920          _heapinfo[block].busy.type = 0;
921          _heapinfo[block].busy.info.size = 1;
922
923          /* Keep the statistics accurate.  */
924          ++_chunks_used;
925          _bytes_used += BLOCKSIZE;
926          _chunks_free -= BLOCKSIZE >> type;
927          _bytes_free -= BLOCKSIZE;
928
929          free (ADDRESS (block));
930        }
931      else if (_heapinfo[block].busy.info.frag.nfree != 0)
932        {
933          /* If some fragments of this block are free, link this
934             fragment into the fragment list after the first free
935             fragment of this block. */
936          next = (struct list *) ptr;
937          next->next = prev->next;
938          next->prev = prev;
939          prev->next = next;
940          if (next->next != NULL)
941            next->next->prev = next;
942          ++_heapinfo[block].busy.info.frag.nfree;
943        }
944      else
945        {
946          /* No fragments of this block are free, so link this
947             fragment into the fragment list and announce that
948             it is the first free fragment of this block. */
949          prev = (struct list *) ptr;
950          _heapinfo[block].busy.info.frag.nfree = 1;
951          _heapinfo[block].busy.info.frag.first = (unsigned long int)
952            ((unsigned long int) ((char *) ptr - (char *) NULL)
953             % BLOCKSIZE >> type);
954          prev->next = _fraghead[type].next;
955          prev->prev = &_fraghead[type];
956          prev->prev->next = prev;
957          if (prev->next != NULL)
958            prev->next->prev = prev;
959        }
960      break;
961    }
962}
963
964/* Return memory to the heap.  */
965void
966free (ptr)
967     __ptr_t ptr;
968{
969  register struct alignlist *l;
970
971  if (ptr == NULL)
972    return;
973
974  for (l = _aligned_blocks; l != NULL; l = l->next)
975    if (l->aligned == ptr)
976      {
977        l->aligned = NULL;      /* Mark the slot in the list as free.  */
978        ptr = l->exact;
979        break;
980      }
981
982  if (__free_hook != NULL)
983    (*__free_hook) (ptr);
984  else
985    _free_internal (ptr);
986}
987/* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
988This file is part of the GNU C Library.
989
990The GNU C Library is free software; you can redistribute it and/or
991modify it under the terms of the GNU Library General Public License as
992published by the Free Software Foundation; either version 2 of the
993License, or (at your option) any later version.
994
995The GNU C Library is distributed in the hope that it will be useful,
996but WITHOUT ANY WARRANTY; without even the implied warranty of
997MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
998Library General Public License for more details.
999
1000You should have received a copy of the GNU Library General Public
1001License along with the GNU C Library; see the file COPYING.LIB.  If
1002not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1003Cambridge, MA 02139, USA.  */
1004
1005#ifndef _MALLOC_INTERNAL
1006#define _MALLOC_INTERNAL
1007#include <malloc.h>
1008#endif
1009
1010#ifdef _LIBC
1011
1012#include <ansidecl.h>
1013#include <gnu-stabs.h>
1014
1015#undef  cfree
1016
1017function_alias(cfree, free, void, (ptr),
1018               DEFUN(cfree, (ptr), PTR ptr))
1019
1020#else
1021
1022void
1023cfree (ptr)
1024     __ptr_t ptr;
1025{
1026  free (ptr);
1027}
1028
1029#endif
1030/* Change the size of a block allocated by `malloc'.
1031   Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1032                     Written May 1989 by Mike Haertel.
1033
1034This library is free software; you can redistribute it and/or
1035modify it under the terms of the GNU Library General Public License as
1036published by the Free Software Foundation; either version 2 of the
1037License, or (at your option) any later version.
1038
1039This library is distributed in the hope that it will be useful,
1040but WITHOUT ANY WARRANTY; without even the implied warranty of
1041MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1042Library General Public License for more details.
1043
1044You should have received a copy of the GNU Library General Public
1045License along with this library; see the file COPYING.LIB.  If
1046not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1047Cambridge, MA 02139, USA.
1048
1049   The author may be reached (Email) at the address mike@ai.mit.edu,
1050   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1051
1052#ifndef _MALLOC_INTERNAL
1053#define _MALLOC_INTERNAL
1054#include <malloc.h>
1055#endif
1056
1057#if  (defined (MEMMOVE_MISSING) || \
1058      !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1059
1060/* Snarfed directly from Emacs src/dispnew.c:
1061   XXX Should use system bcopy if it handles overlap.  */
1062#ifndef emacs
1063
1064/* Like bcopy except never gets confused by overlap.  */
1065
1066static void
1067safe_bcopy (from, to, size)
1068     char *from, *to;
1069     int size;
1070{
1071  if (size <= 0 || from == to)
1072    return;
1073
1074  /* If the source and destination don't overlap, then bcopy can
1075     handle it.  If they do overlap, but the destination is lower in
1076     memory than the source, we'll assume bcopy can handle that.  */
1077  if (to < from || from + size <= to)
1078    bcopy (from, to, size);
1079
1080  /* Otherwise, we'll copy from the end.  */
1081  else
1082    {
1083      register char *endf = from + size;
1084      register char *endt = to + size;
1085
1086      /* If TO - FROM is large, then we should break the copy into
1087         nonoverlapping chunks of TO - FROM bytes each.  However, if
1088         TO - FROM is small, then the bcopy function call overhead
1089         makes this not worth it.  The crossover point could be about
1090         anywhere.  Since I don't think the obvious copy loop is too
1091         bad, I'm trying to err in its favor.  */
1092      if (to - from < 64)
1093        {
1094          do
1095            *--endt = *--endf;
1096          while (endf != from);
1097        }
1098      else
1099        {
1100          for (;;)
1101            {
1102              endt -= (to - from);
1103              endf -= (to - from);
1104
1105              if (endt < to)
1106                break;
1107
1108              bcopy (endf, endt, to - from);
1109            }
1110
1111          /* If SIZE wasn't a multiple of TO - FROM, there will be a
1112             little left over.  The amount left over is
1113             (endt + (to - from)) - to, which is endt - from.  */
1114          bcopy (from, to, endt - from);
1115        }
1116    }
1117}     
1118#endif  /* Not emacs.  */
1119
1120#define memmove(to, from, size) safe_bcopy ((from), (to), (size))
1121
1122#endif
1123
1124
1125#define min(A, B) ((A) < (B) ? (A) : (B))
1126
1127/* Debugging hook for realloc.  */
1128__ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
1129
1130/* Resize the given region to the new size, returning a pointer
1131   to the (possibly moved) region.  This is optimized for speed;
1132   some benchmarks seem to indicate that greater compactness is
1133   achieved by unconditionally allocating and copying to a
1134   new region.  This module has incestuous knowledge of the
1135   internals of both free and malloc. */
1136__ptr_t
1137realloc (ptr, size)
1138     __ptr_t ptr;
1139     __malloc_size_t size;
1140{
1141  __ptr_t result;
1142  int type;
1143  __malloc_size_t block, blocks, oldlimit;
1144
1145  if (size == 0)
1146    {
1147      free (ptr);
1148      return malloc (0);
1149    }
1150  else if (ptr == NULL)
1151    return malloc (size);
1152
1153  if (__realloc_hook != NULL)
1154    return (*__realloc_hook) (ptr, size);
1155
1156  block = BLOCK (ptr);
1157
1158  type = _heapinfo[block].busy.type;
1159  switch (type)
1160    {
1161    case 0:
1162      /* Maybe reallocate a large block to a small fragment.  */
1163      if (size <= BLOCKSIZE / 2)
1164        {
1165          result = malloc (size);
1166          if (result != NULL)
1167            {
1168              memcpy (result, ptr, size);
1169              _free_internal (ptr);
1170              return result;
1171            }
1172        }
1173
1174      /* The new size is a large allocation as well;
1175         see if we can hold it in place. */
1176      blocks = BLOCKIFY (size);
1177      if (blocks < _heapinfo[block].busy.info.size)
1178        {
1179          /* The new size is smaller; return
1180             excess memory to the free list. */
1181          _heapinfo[block + blocks].busy.type = 0;
1182          _heapinfo[block + blocks].busy.info.size
1183            = _heapinfo[block].busy.info.size - blocks;
1184          _heapinfo[block].busy.info.size = blocks;
1185          /* We have just created a new chunk by splitting a chunk in two.
1186             Now we will free this chunk; increment the statistics counter
1187             so it doesn't become wrong when _free_internal decrements it.  */
1188          ++_chunks_used;
1189          _free_internal (ADDRESS (block + blocks));
1190          result = ptr;
1191        }
1192      else if (blocks == _heapinfo[block].busy.info.size)
1193        /* No size change necessary.  */
1194        result = ptr;
1195      else
1196        {
1197          /* Won't fit, so allocate a new region that will.
1198             Free the old region first in case there is sufficient
1199             adjacent free space to grow without moving. */
1200          blocks = _heapinfo[block].busy.info.size;
1201          /* Prevent free from actually returning memory to the system.  */
1202          oldlimit = _heaplimit;
1203          _heaplimit = 0;
1204          _free_internal (ptr);
1205          _heaplimit = oldlimit;
1206          result = malloc (size);
1207          if (result == NULL)
1208            {
1209              /* Now we're really in trouble.  We have to unfree
1210                 the thing we just freed.  Unfortunately it might
1211                 have been coalesced with its neighbors.  */
1212              if (_heapindex == block)
1213                (void) malloc (blocks * BLOCKSIZE);
1214              else
1215                {
1216                  __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
1217                  (void) malloc (blocks * BLOCKSIZE);
1218                  _free_internal (previous);
1219                }
1220              return NULL;
1221            }
1222          if (ptr != result)
1223            memmove (result, ptr, blocks * BLOCKSIZE);
1224        }
1225      break;
1226
1227    default:
1228      /* Old size is a fragment; type is logarithm
1229         to base two of the fragment size.  */
1230      if (size > (__malloc_size_t) (1 << (type - 1)) &&
1231          size <= (__malloc_size_t) (1 << type))
1232        /* The new size is the same kind of fragment.  */
1233        result = ptr;
1234      else
1235        {
1236          /* The new size is different; allocate a new space,
1237             and copy the lesser of the new size and the old. */
1238          result = malloc (size);
1239          if (result == NULL)
1240            return NULL;
1241          memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1242          free (ptr);
1243        }
1244      break;
1245    }
1246
1247  return result;
1248}
1249/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1250
1251This library is free software; you can redistribute it and/or
1252modify it under the terms of the GNU Library General Public License as
1253published by the Free Software Foundation; either version 2 of the
1254License, or (at your option) any later version.
1255
1256This library is distributed in the hope that it will be useful,
1257but WITHOUT ANY WARRANTY; without even the implied warranty of
1258MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1259Library General Public License for more details.
1260
1261You should have received a copy of the GNU Library General Public
1262License along with this library; see the file COPYING.LIB.  If
1263not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1264Cambridge, MA 02139, USA.
1265
1266   The author may be reached (Email) at the address mike@ai.mit.edu,
1267   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1268
1269#ifndef _MALLOC_INTERNAL
1270#define _MALLOC_INTERNAL
1271#include <malloc.h>
1272#endif
1273
1274/* Allocate an array of NMEMB elements each SIZE bytes long.
1275   The entire array is initialized to zeros.  */
1276__ptr_t
1277calloc (nmemb, size)
1278     register __malloc_size_t nmemb;
1279     register __malloc_size_t size;
1280{
1281  register __ptr_t result = malloc (nmemb * size);
1282
1283  if (result != NULL)
1284    (void) memset (result, 0, nmemb * size);
1285
1286  return result;
1287}
1288/* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1289This file is part of the GNU C Library.
1290
1291The GNU C Library is free software; you can redistribute it and/or modify
1292it under the terms of the GNU General Public License as published by
1293the Free Software Foundation; either version 2, or (at your option)
1294any later version.
1295
1296The GNU C Library is distributed in the hope that it will be useful,
1297but WITHOUT ANY WARRANTY; without even the implied warranty of
1298MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1299GNU General Public License for more details.
1300
1301You should have received a copy of the GNU General Public License
1302along with the GNU C Library; see the file COPYING.  If not, write to
1303the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
1304
1305#ifndef _MALLOC_INTERNAL
1306#define _MALLOC_INTERNAL
1307#include <malloc.h>
1308#endif
1309
1310#ifndef __GNU_LIBRARY__
1311#define __sbrk  sbrk
1312#endif
1313
1314#ifdef __GNU_LIBRARY__
1315#ifndef __GLIBC__
1316/* It is best not to declare this and cast its result on foreign operating
1317   systems with potentially hostile include files.  */
1318extern __ptr_t __sbrk __P ((int increment));
1319#endif
1320#endif
1321
1322#ifndef NULL
1323#define NULL 0
1324#endif
1325
1326/* Allocate INCREMENT more bytes of data space,
1327   and return the start of data space, or NULL on errors.
1328   If INCREMENT is negative, shrink data space.  */
1329__ptr_t
1330__default_morecore (increment)
1331#ifdef __STDC__
1332     ptrdiff_t increment;
1333#else
1334     int increment;
1335#endif
1336{
1337  __ptr_t result = (__ptr_t) __sbrk ((int) increment);
1338  if (result == (__ptr_t) -1)
1339    return NULL;
1340  return result;
1341}
1342/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1343
1344This library is free software; you can redistribute it and/or
1345modify it under the terms of the GNU Library General Public License as
1346published by the Free Software Foundation; either version 2 of the
1347License, or (at your option) any later version.
1348
1349This library is distributed in the hope that it will be useful,
1350but WITHOUT ANY WARRANTY; without even the implied warranty of
1351MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1352Library General Public License for more details.
1353
1354You should have received a copy of the GNU Library General Public
1355License along with this library; see the file COPYING.LIB.  If
1356not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1357Cambridge, MA 02139, USA.  */
1358
1359#ifndef _MALLOC_INTERNAL
1360#define _MALLOC_INTERNAL
1361#include <malloc.h>
1362#endif
1363
1364__ptr_t (*__memalign_hook) __P ((size_t __size, size_t __alignment));
1365
1366__ptr_t
1367memalign (alignment, size)
1368     __malloc_size_t alignment;
1369     __malloc_size_t size;
1370{
1371  __ptr_t result;
1372  unsigned long int adj;
1373
1374  if (__memalign_hook)
1375    return (*__memalign_hook) (alignment, size);
1376
1377  size = ((size + alignment - 1) / alignment) * alignment;
1378
1379  result = malloc (size);
1380  if (result == NULL)
1381    return NULL;
1382  adj = (unsigned long int) ((unsigned long int) ((char *) result -
1383                                                  (char *) NULL)) % alignment;
1384  if (adj != 0)
1385    {
1386      struct alignlist *l;
1387      for (l = _aligned_blocks; l != NULL; l = l->next)
1388        if (l->aligned == NULL)
1389          /* This slot is free.  Use it.  */
1390          break;
1391      if (l == NULL)
1392        {
1393          l = (struct alignlist *) malloc (sizeof (struct alignlist));
1394          if (l == NULL)
1395            {
1396              free (result);
1397              return NULL;
1398            }
1399          l->next = _aligned_blocks;
1400          _aligned_blocks = l;
1401        }
1402      l->exact = result;
1403      result = l->aligned = (char *) result + alignment - adj;
1404    }
1405
1406  return result;
1407}
1408
1409#endif /* HAVE_GMALLOC */
Note: See TracBrowser for help on using the repository browser.