source: git/Singular/gmalloc.c @ 393249

spielwiese
Last change on this file since 393249 was 393249, checked in by Hans Schönemann <hannes@…>, 25 years ago
* hannes: 64-bit fixes git-svn-id: file:///usr/local/Singular/svn/trunk@3197 2c84dea3-7e68-4137-9b89-c4e89433aadc
  • Property mode set to 100644
File size: 41.0 KB
Line 
1/****************************************
2*  Computer Algebra System SINGULAR     *
3****************************************/
4/* $Id: */
5
6/* gmalloc used by Singular to have a trusted malloc and valloc
7   slightly edited to include mod2.h and to only provide its functionality
8   if HAVE_GMALLOC is defined
9*/   
10
11#ifdef HAVE_CONFIG_H
12#include "mod2.h"
13#endif
14
15/* #ifdef HAVE_GMALLOC */
16#if 1
17
18#define __USE_XOPEN
19#define __USE_XOPEN_EXTENDED
20#define _MALLOC_INTERNAL
21
22/* The malloc headers and source files from the C library follow here.  */
23
24/* Declarations for `malloc' and friends.
25   Copyright 1990, 1991, 1992, 1993, 1995 Free Software Foundation, Inc.
26                  Written May 1989 by Mike Haertel.
27
28This library is free software; you can redistribute it and/or
29modify it under the terms of the GNU Library General Public License as
30published by the Free Software Foundation; either version 2 of the
31License, or (at your option) any later version.
32
33This library is distributed in the hope that it will be useful,
34but WITHOUT ANY WARRANTY; without even the implied warranty of
35MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
36Library General Public License for more details.
37
38You should have received a copy of the GNU Library General Public
39License along with this library; see the file COPYING.LIB.  If
40not, write to the Free Software Foundation, Inc., 675 Mass Ave,
41Cambridge, MA 02139, USA.
42
43   The author may be reached (Email) at the address mike@ai.mit.edu,
44   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
45
46#ifndef _MALLOC_H
47
48#define _MALLOC_H       1
49
50#ifdef _MALLOC_INTERNAL
51
52#if     defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
53#include <string.h>
54#else
55#ifndef memset
56#define memset(s, zero, n)      bzero ((s), (n))
57#endif
58#ifndef memcpy
59#define memcpy(d, s, n)         bcopy ((s), (d), (n))
60#endif
61#endif
62
63#if     defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
64#include <limits.h>
65#else
66#ifndef CHAR_BIT
67#define CHAR_BIT        8
68#endif
69#endif
70
71#ifdef  HAVE_UNISTD_H
72#include <unistd.h>
73#endif
74
75#endif  /* _MALLOC_INTERNAL.  */
76
77
78#ifdef  __cplusplus
79extern "C"
80{
81#endif
82
83#if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
84#undef  __P
85#define __P(args)       args
86#undef  __ptr_t
87#define __ptr_t         void *
88#else /* Not C++ or ANSI C.  */
89#undef  __P
90#define __P(args)       ()
91#undef  const
92#define const
93#undef  __ptr_t
94#define __ptr_t         char *
95#endif /* C++ or ANSI C.  */
96
97#if defined (__STDC__) && __STDC__
98#include <stddef.h>
99#define __malloc_size_t         size_t
100#define __malloc_ptrdiff_t      ptrdiff_t
101#else
102#define __malloc_size_t         unsigned int
103#define __malloc_ptrdiff_t      int
104#endif
105
106#ifndef NULL
107#define NULL    0
108#endif
109
110
111/* Allocate SIZE bytes of memory.  */
112extern __ptr_t malloc __P ((__malloc_size_t __size));
113/* Re-allocate the previously allocated block
114   in __ptr_t, making the new block SIZE bytes long.  */
115extern __ptr_t realloc __P ((__ptr_t __ptr, __malloc_size_t __size));
116/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
117extern __ptr_t calloc __P ((__malloc_size_t __nmemb, __malloc_size_t __size));
118/* Free a block allocated by `malloc', `realloc' or `calloc'.  */
119extern void free __P ((__ptr_t __ptr));
120
121/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
122extern __ptr_t memalign __P ((__malloc_size_t __alignment,
123                              __malloc_size_t __size));
124
125/* Allocate SIZE bytes on a page boundary.  */
126extern __ptr_t valloc __P ((__malloc_size_t __size));
127
128
129#ifdef _MALLOC_INTERNAL
130
131/* The allocator divides the heap into blocks of fixed size; large
132   requests receive one or more whole blocks, and small requests
133   receive a fragment of a block.  Fragment sizes are powers of two,
134   and all fragments of a block are the same size.  When all the
135   fragments in a block have been freed, the block itself is freed.  */
136#define INT_BIT         (CHAR_BIT * sizeof(int))
137#define BLOCKLOG        (INT_BIT > 16 ? 12 : 9)
138#define BLOCKSIZE       (1 << BLOCKLOG)
139#define BLOCKIFY(SIZE)  (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
140
141/* Determine the amount of memory spanned by the initial heap table
142   (not an absolute limit).  */
143#define HEAP            (INT_BIT > 16 ? 4194304 : 65536)
144
145/* Number of contiguous free blocks allowed to build up at the end of
146   memory before they will be returned to the system.  */
147#define FINAL_FREE_BLOCKS       8
148
149/* Data structure giving per-block information.  */
150typedef union
151  {
152    /* Heap information for a busy block.  */
153    struct
154      {
155        /* Zero for a large (multiblock) object, or positive giving the
156           logarithm to the base two of the fragment size.  */
157        int type;
158        union
159          {
160            struct
161              {
162                __malloc_size_t nfree; /* Free frags in a fragmented block.  */
163                __malloc_size_t first; /* First free fragment of the block.  */
164              } frag;
165            /* For a large object, in its first block, this has the number
166               of blocks in the object.  In the other blocks, this has a
167               negative number which says how far back the first block is.  */
168            __malloc_ptrdiff_t size;
169          } info;
170      } busy;
171    /* Heap information for a free block
172       (that may be the first of a free cluster).  */
173    struct
174      {
175        __malloc_size_t size;   /* Size (in blocks) of a free cluster.  */
176        __malloc_size_t next;   /* Index of next free cluster.  */
177        __malloc_size_t prev;   /* Index of previous free cluster.  */
178      } free;
179  } malloc_info;
180
181/* Pointer to first block of the heap.  */
182extern char *_heapbase;
183
184/* Table indexed by block number giving per-block information.  */
185extern malloc_info *_heapinfo;
186
187/* Address to block number and vice versa.  */
188#define BLOCK(A)        (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
189#define ADDRESS(B)      ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
190
191/* Current search index for the heap table.  */
192extern __malloc_size_t _heapindex;
193
194/* Limit of valid info table indices.  */
195extern __malloc_size_t _heaplimit;
196
197/* Doubly linked lists of free fragments.  */
198struct list
199  {
200    struct list *next;
201    struct list *prev;
202  };
203
204/* Free list headers for each fragment size.  */
205extern struct list _fraghead[];
206
207/* List of blocks allocated with `memalign' (or `valloc').  */
208struct alignlist
209  {
210    struct alignlist *next;
211    __ptr_t aligned;            /* The address that memaligned returned.  */
212    __ptr_t exact;              /* The address that malloc returned.  */
213  };
214extern struct alignlist *_aligned_blocks;
215
216/* Instrumentation.  */
217extern __malloc_size_t _chunks_used;
218extern __malloc_size_t _bytes_used;
219extern __malloc_size_t _chunks_free;
220extern __malloc_size_t _bytes_free;
221
222/* Internal version of `free' used in `morecore' (malloc.c). */
223extern void _free_internal __P ((__ptr_t __ptr));
224
225#endif /* _MALLOC_INTERNAL.  */
226
227/* Given an address in the middle of a malloc'd object,
228   return the address of the beginning of the object.  */
229extern __ptr_t malloc_find_object_address __P ((__ptr_t __ptr));
230
231/* Underlying allocation function; successive calls should
232   return contiguous pieces of memory.  */
233extern __ptr_t (*__morecore) __P ((__malloc_ptrdiff_t __size));
234
235/* Default value of `__morecore'.  */
236extern __ptr_t __default_morecore __P ((__malloc_ptrdiff_t __size));
237
238/* If not NULL, this function is called after each time
239   `__morecore' is called to increase the data size.  */
240extern void (*__after_morecore_hook) __P ((void));
241
242/* Nonzero if `malloc' has been called and done its initialization.  */
243extern int __malloc_initialized;
244
245/* Hooks for debugging versions.  */
246extern void (*__malloc_initialize_hook) __P ((void));
247extern void (*__free_hook) __P ((__ptr_t __ptr));
248extern __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
249extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
250extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size,
251                                        __malloc_size_t __alignment));
252
253/* Return values for `mprobe': these are the kinds of inconsistencies that
254   `mcheck' enables detection of.  */
255enum mcheck_status
256  {
257    MCHECK_DISABLED = -1,       /* Consistency checking is not turned on.  */
258    MCHECK_OK,                  /* Block is fine.  */
259    MCHECK_FREE,                /* Block freed twice.  */
260    MCHECK_HEAD,                /* Memory before the block was clobbered.  */
261    MCHECK_TAIL                 /* Memory after the block was clobbered.  */
262  };
263
264/* Activate a standard collection of debugging hooks.  This must be called
265   before `malloc' is ever called.  ABORTFUNC is called with an error code
266   (see enum above) when an inconsistency is detected.  If ABORTFUNC is
267   null, the standard function prints on stderr and then calls `abort'.  */
268extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
269
270/* Check for aberrations in a particular malloc'd block.  You must have
271   called `mcheck' already.  These are the same checks that `mcheck' does
272   when you free or reallocate a block.  */
273extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
274
275/* Activate a standard collection of tracing hooks.  */
276extern void mtrace __P ((void));
277extern void muntrace __P ((void));
278
279/* Statistics available to the user.  */
280struct mstats
281  {
282    __malloc_size_t bytes_total; /* Total size of the heap. */
283    __malloc_size_t chunks_used; /* Chunks allocated by the user. */
284    __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
285    __malloc_size_t chunks_free; /* Chunks in the free list. */
286    __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
287  };
288
289/* Pick up the current statistics. */
290extern struct mstats mstats __P ((void));
291
292/* Call WARNFUN with a warning message when memory usage is high.  */
293extern void memory_warnings __P ((__ptr_t __start,
294                                  void (*__warnfun) __P ((const char *))));
295
296
297/* Relocating allocator.  */
298
299/* Allocate SIZE bytes, and store the address in *HANDLEPTR.  */
300extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
301
302/* Free the storage allocated in HANDLEPTR.  */
303extern void r_alloc_free __P ((__ptr_t *__handleptr));
304
305/* Adjust the block at HANDLEPTR to be SIZE bytes long.  */
306extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
307
308
309#ifdef  __cplusplus
310}
311#endif
312
313#endif /* malloc.h  */
314/* Allocate memory on a page boundary.
315   Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
316
317This library is free software; you can redistribute it and/or
318modify it under the terms of the GNU Library General Public License as
319published by the Free Software Foundation; either version 2 of the
320License, or (at your option) any later version.
321
322This library is distributed in the hope that it will be useful,
323but WITHOUT ANY WARRANTY; without even the implied warranty of
324MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
325Library General Public License for more details.
326
327You should have received a copy of the GNU Library General Public
328License along with this library; see the file COPYING.LIB.  If
329not, write to the Free Software Foundation, Inc., 675 Mass Ave,
330Cambridge, MA 02139, USA.
331
332   The author may be reached (Email) at the address mike@ai.mit.edu,
333   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
334
335#if defined (__GNU_LIBRARY__) || defined (_LIBC)
336#include <stddef.h>
337#include <sys/cdefs.h>
338/* obachman: no declaration: conflicts with gnulibc6 unistd.h */
339/* extern size_t __getpagesize __P ((void)); */
340#else
341#if 0 /* obachman: pasted in getpagesize.h manually */
342#include "getpagesize.h"
343#else
344
345#ifdef VMS
346#define getpagesize() 512
347#endif
348
349#ifdef HAVE_UNISTD_H
350#include <unistd.h>
351#endif
352
353#ifdef _SC_PAGESIZE
354#define getpagesize() sysconf(_SC_PAGESIZE)
355#else
356
357#include <sys/param.h>
358
359#ifdef EXEC_PAGESIZE
360#define getpagesize() EXEC_PAGESIZE
361#else
362#ifdef NBPG
363#define getpagesize() NBPG * CLSIZE
364#ifndef CLSIZE
365#define CLSIZE 1
366#endif /* no CLSIZE */
367#else /* no NBPG */
368#ifdef NBPC
369#define getpagesize() NBPC
370#else /* no NBPC */
371#ifdef PAGESIZE
372#define getpagesize() PAGESIZE
373#endif
374#endif /* NBPC */
375#endif /* no NBPG */
376#endif /* no EXEC_PAGESIZE */
377#endif /* no _SC_PAGESIZE */
378
379/* obachman: undef , gnulibc6 conflict with unistd.h */
380#define  __getpagesize()        getpagesize()
381#endif /* if 0 */
382#endif
383
384#ifndef _MALLOC_INTERNAL
385#define _MALLOC_INTERNAL
386#include <malloc.h>
387#endif
388
389static __malloc_size_t pagesize;
390
391__ptr_t
392valloc (size)
393     __malloc_size_t size;
394{
395  if (pagesize == 0)
396/* obachman: use getpagesize, instead
397    pagesize = __getpagesize ();
398*/
399    pagesize = getpagesize ();
400
401  return memalign (pagesize, size);
402}
403/* Memory allocator `malloc'.
404   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
405                  Written May 1989 by Mike Haertel.
406
407This library is free software; you can redistribute it and/or
408modify it under the terms of the GNU Library General Public License as
409published by the Free Software Foundation; either version 2 of the
410License, or (at your option) any later version.
411
412This library is distributed in the hope that it will be useful,
413but WITHOUT ANY WARRANTY; without even the implied warranty of
414MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
415Library General Public License for more details.
416
417You should have received a copy of the GNU Library General Public
418License along with this library; see the file COPYING.LIB.  If
419not, write to the Free Software Foundation, Inc., 675 Mass Ave,
420Cambridge, MA 02139, USA.
421
422   The author may be reached (Email) at the address mike@ai.mit.edu,
423   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
424
425#ifndef _MALLOC_INTERNAL
426#define _MALLOC_INTERNAL
427#include <malloc.h>
428#endif
429
430/* How to really get more memory.  */
431__ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
432
433/* Debugging hook for `malloc'.  */
434__ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
435
436/* Pointer to the base of the first block.  */
437char *_heapbase;
438
439/* Block information table.  Allocated with align/__free (not malloc/free).  */
440malloc_info *_heapinfo;
441
442/* Number of info entries.  */
443static __malloc_size_t heapsize;
444
445/* Search index in the info table.  */
446__malloc_size_t _heapindex;
447
448/* Limit of valid info table indices.  */
449__malloc_size_t _heaplimit;
450
451/* Free lists for each fragment size.  */
452struct list _fraghead[BLOCKLOG];
453
454/* Instrumentation.  */
455__malloc_size_t _chunks_used;
456__malloc_size_t _bytes_used;
457__malloc_size_t _chunks_free;
458__malloc_size_t _bytes_free;
459
460/* Are you experienced?  */
461int __malloc_initialized;
462
463void (*__malloc_initialize_hook) __P ((void));
464void (*__after_morecore_hook) __P ((void));
465
466/* Aligned allocation.  */
467static __ptr_t align __P ((__malloc_size_t));
468static __ptr_t
469align (size)
470     __malloc_size_t size;
471{
472  __ptr_t result;
473  unsigned long int adj;
474
475  result = (*__morecore) (size);
476  adj = (unsigned long int) ((unsigned long int) ((char *) result -
477                                                  (char *) NULL)) % BLOCKSIZE;
478  if (adj != 0)
479    {
480      adj = BLOCKSIZE - adj;
481      (void) (*__morecore) (adj);
482      result = (char *) result + adj;
483    }
484
485  if (__after_morecore_hook)
486    (*__after_morecore_hook) ();
487
488  return result;
489}
490
491/* Set everything up and remember that we have.  */
492static int initialize __P ((void));
493static int
494initialize ()
495{
496  if (__malloc_initialize_hook)
497    (*__malloc_initialize_hook) ();
498
499  heapsize = HEAP / BLOCKSIZE;
500  _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
501  if (_heapinfo == NULL)
502    return 0;
503  memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
504  _heapinfo[0].free.size = 0;
505  _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
506  _heapindex = 0;
507  _heapbase = (char *) _heapinfo;
508
509  /* Account for the _heapinfo block itself in the statistics.  */
510  _bytes_used = heapsize * sizeof (malloc_info);
511  _chunks_used = 1;
512
513  __malloc_initialized = 1;
514  return 1;
515}
516
517/* Get neatly aligned memory, initializing or
518   growing the heap info table as necessary. */
519static __ptr_t morecore __P ((__malloc_size_t));
520static __ptr_t
521morecore (size)
522     __malloc_size_t size;
523{
524  __ptr_t result;
525  malloc_info *newinfo, *oldinfo;
526  __malloc_size_t newsize;
527
528  result = align (size);
529  if (result == NULL)
530    return NULL;
531
532  /* Check if we need to grow the info table.  */
533  if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
534    {
535      newsize = heapsize;
536      while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
537        newsize *= 2;
538      newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
539      if (newinfo == NULL)
540        {
541          (*__morecore) (-size);
542          return NULL;
543        }
544      memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
545      memset (&newinfo[heapsize], 0,
546              (newsize - heapsize) * sizeof (malloc_info));
547      oldinfo = _heapinfo;
548      newinfo[BLOCK (oldinfo)].busy.type = 0;
549      newinfo[BLOCK (oldinfo)].busy.info.size
550        = BLOCKIFY (heapsize * sizeof (malloc_info));
551      _heapinfo = newinfo;
552      /* Account for the _heapinfo block itself in the statistics.  */
553      _bytes_used += newsize * sizeof (malloc_info);
554      ++_chunks_used;
555      _free_internal (oldinfo);
556      heapsize = newsize;
557    }
558
559  _heaplimit = BLOCK ((char *) result + size);
560  return result;
561}
562
563/* Allocate memory from the heap.  */
564__ptr_t
565malloc (size)
566     __malloc_size_t size;
567{
568  __ptr_t result;
569  __malloc_size_t block, blocks, lastblocks, start;
570  register __malloc_size_t i;
571  struct list *next;
572
573  /* ANSI C allows `malloc (0)' to either return NULL, or to return a
574     valid address you can realloc and free (though not dereference).
575
576     It turns out that some extant code (sunrpc, at least Ultrix's version)
577     expects `malloc (0)' to return non-NULL and breaks otherwise.
578     Be compatible.  */
579
580#if     0
581  if (size == 0)
582    return NULL;
583#endif
584
585  if (__malloc_hook != NULL)
586    return (*__malloc_hook) (size);
587
588  if (!__malloc_initialized)
589    if (!initialize ())
590      return NULL;
591
592  if (size < sizeof (struct list))
593    size = sizeof (struct list);
594
595#ifdef SUNOS_LOCALTIME_BUG
596  if (size < 16)
597    size = 16;
598#endif
599
600  /* Determine the allocation policy based on the request size.  */
601  if (size <= BLOCKSIZE / 2)
602    {
603      /* Small allocation to receive a fragment of a block.
604         Determine the logarithm to base two of the fragment size. */
605      register __malloc_size_t log = 1;
606      --size;
607      while ((size /= 2) != 0)
608        ++log;
609
610      /* Look in the fragment lists for a
611         free fragment of the desired size. */
612      next = _fraghead[log].next;
613      if (next != NULL)
614        {
615          /* There are free fragments of this size.
616             Pop a fragment out of the fragment list and return it.
617             Update the block's nfree and first counters. */
618          result = (__ptr_t) next;
619          next->prev->next = next->next;
620          if (next->next != NULL)
621            next->next->prev = next->prev;
622          block = BLOCK (result);
623          if (--_heapinfo[block].busy.info.frag.nfree != 0)
624            _heapinfo[block].busy.info.frag.first = (unsigned long int)
625              ((unsigned long int) ((char *) next->next - (char *) NULL)
626               % BLOCKSIZE) >> log;
627
628          /* Update the statistics.  */
629          ++_chunks_used;
630          _bytes_used += 1 << log;
631          --_chunks_free;
632          _bytes_free -= 1 << log;
633        }
634      else
635        {
636          /* No free fragments of the desired size, so get a new block
637             and break it into fragments, returning the first.  */
638          result = malloc (BLOCKSIZE);
639          if (result == NULL)
640            return NULL;
641
642          /* Link all fragments but the first into the free list.  */
643          for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
644            {
645              next = (struct list *) ((char *) result + (i << log));
646              next->next = _fraghead[log].next;
647              next->prev = &_fraghead[log];
648              next->prev->next = next;
649              if (next->next != NULL)
650                next->next->prev = next;
651            }
652
653          /* Initialize the nfree and first counters for this block.  */
654          block = BLOCK (result);
655          _heapinfo[block].busy.type = log;
656          _heapinfo[block].busy.info.frag.nfree = i - 1;
657          _heapinfo[block].busy.info.frag.first = i - 1;
658
659          _chunks_free += (BLOCKSIZE >> log) - 1;
660          _bytes_free += BLOCKSIZE - (1 << log);
661          _bytes_used -= BLOCKSIZE - (1 << log);
662        }
663    }
664  else
665    {
666      /* Large allocation to receive one or more blocks.
667         Search the free list in a circle starting at the last place visited.
668         If we loop completely around without finding a large enough
669         space we will have to get more memory from the system.  */
670      blocks = BLOCKIFY (size);
671      start = block = _heapindex;
672      while (_heapinfo[block].free.size < blocks)
673        {
674          block = _heapinfo[block].free.next;
675          if (block == start)
676            {
677              /* Need to get more from the system.  Check to see if
678                 the new core will be contiguous with the final free
679                 block; if so we don't need to get as much.  */
680              block = _heapinfo[0].free.prev;
681              lastblocks = _heapinfo[block].free.size;
682              if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
683                  (*__morecore) (0) == ADDRESS (block + lastblocks) &&
684                  (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
685                {
686                  /* Which block we are extending (the `final free
687                     block' referred to above) might have changed, if
688                     it got combined with a freed info table.  */
689                  block = _heapinfo[0].free.prev;
690                  _heapinfo[block].free.size += (blocks - lastblocks);
691                  _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
692                  continue;
693                }
694              result = morecore (blocks * BLOCKSIZE);
695              if (result == NULL)
696                return NULL;
697              block = BLOCK (result);
698              _heapinfo[block].busy.type = 0;
699              _heapinfo[block].busy.info.size = blocks;
700              ++_chunks_used;
701              _bytes_used += blocks * BLOCKSIZE;
702              return result;
703            }
704        }
705
706      /* At this point we have found a suitable free list entry.
707         Figure out how to remove what we need from the list. */
708      result = ADDRESS (block);
709      if (_heapinfo[block].free.size > blocks)
710        {
711          /* The block we found has a bit left over,
712             so relink the tail end back into the free list. */
713          _heapinfo[block + blocks].free.size
714            = _heapinfo[block].free.size - blocks;
715          _heapinfo[block + blocks].free.next
716            = _heapinfo[block].free.next;
717          _heapinfo[block + blocks].free.prev
718            = _heapinfo[block].free.prev;
719          _heapinfo[_heapinfo[block].free.prev].free.next
720            = _heapinfo[_heapinfo[block].free.next].free.prev
721            = _heapindex = block + blocks;
722        }
723      else
724        {
725          /* The block exactly matches our requirements,
726             so just remove it from the list. */
727          _heapinfo[_heapinfo[block].free.next].free.prev
728            = _heapinfo[block].free.prev;
729          _heapinfo[_heapinfo[block].free.prev].free.next
730            = _heapindex = _heapinfo[block].free.next;
731          --_chunks_free;
732        }
733
734      _heapinfo[block].busy.type = 0;
735      _heapinfo[block].busy.info.size = blocks;
736      ++_chunks_used;
737      _bytes_used += blocks * BLOCKSIZE;
738      _bytes_free -= blocks * BLOCKSIZE;
739
740      /* Mark all the blocks of the object just allocated except for the
741         first with a negative number so you can find the first block by
742         adding that adjustment.  */
743      while (--blocks > 0)
744        _heapinfo[block + blocks].busy.info.size = -blocks;
745    }
746
747  return result;
748}
749
750#ifndef _LIBC
751
752/* On some ANSI C systems, some libc functions call _malloc, _free
753   and _realloc.  Make them use the GNU functions.  */
754
755__ptr_t
756_malloc (size)
757     __malloc_size_t size;
758{
759  return malloc (size);
760}
761
762void
763_free (ptr)
764     __ptr_t ptr;
765{
766  free (ptr);
767}
768
769__ptr_t
770_realloc (ptr, size)
771     __ptr_t ptr;
772     __malloc_size_t size;
773{
774  return realloc (ptr, size);
775}
776
777#endif
778/* Free a block of memory allocated by `malloc'.
779   Copyright 1990, 1991, 1992, 1994 Free Software Foundation, Inc.
780                  Written May 1989 by Mike Haertel.
781
782This library is free software; you can redistribute it and/or
783modify it under the terms of the GNU Library General Public License as
784published by the Free Software Foundation; either version 2 of the
785License, or (at your option) any later version.
786
787This library is distributed in the hope that it will be useful,
788but WITHOUT ANY WARRANTY; without even the implied warranty of
789MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
790Library General Public License for more details.
791
792You should have received a copy of the GNU Library General Public
793License along with this library; see the file COPYING.LIB.  If
794not, write to the Free Software Foundation, Inc., 675 Mass Ave,
795Cambridge, MA 02139, USA.
796
797   The author may be reached (Email) at the address mike@ai.mit.edu,
798   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
799
800#ifndef _MALLOC_INTERNAL
801#define _MALLOC_INTERNAL
802#include <malloc.h>
803#endif
804
805/* Debugging hook for free.  */
806void (*__free_hook) __P ((__ptr_t __ptr));
807
808/* List of blocks allocated by memalign.  */
809struct alignlist *_aligned_blocks = NULL;
810
811/* Return memory to the heap.
812   Like `free' but don't call a __free_hook if there is one.  */
813void
814_free_internal (ptr)
815     __ptr_t ptr;
816{
817  int type;
818  __malloc_size_t block, blocks;
819  register __malloc_size_t i;
820  struct list *prev, *next;
821
822  block = BLOCK (ptr);
823
824  type = _heapinfo[block].busy.type;
825  switch (type)
826    {
827    case 0:
828      /* Get as many statistics as early as we can.  */
829      --_chunks_used;
830      _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
831      _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
832
833      /* Find the free cluster previous to this one in the free list.
834         Start searching at the last block referenced; this may benefit
835         programs with locality of allocation.  */
836      i = _heapindex;
837      if (i > block)
838        while (i > block)
839          i = _heapinfo[i].free.prev;
840      else
841        {
842          do
843            i = _heapinfo[i].free.next;
844          while (i > 0 && i < block);
845          i = _heapinfo[i].free.prev;
846        }
847
848      /* Determine how to link this block into the free list.  */
849      if (block == i + _heapinfo[i].free.size)
850        {
851          /* Coalesce this block with its predecessor.  */
852          _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
853          block = i;
854        }
855      else
856        {
857          /* Really link this block back into the free list.  */
858          _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
859          _heapinfo[block].free.next = _heapinfo[i].free.next;
860          _heapinfo[block].free.prev = i;
861          _heapinfo[i].free.next = block;
862          _heapinfo[_heapinfo[block].free.next].free.prev = block;
863          ++_chunks_free;
864        }
865
866      /* Now that the block is linked in, see if we can coalesce it
867         with its successor (by deleting its successor from the list
868         and adding in its size).  */
869      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
870        {
871          _heapinfo[block].free.size
872            += _heapinfo[_heapinfo[block].free.next].free.size;
873          _heapinfo[block].free.next
874            = _heapinfo[_heapinfo[block].free.next].free.next;
875          _heapinfo[_heapinfo[block].free.next].free.prev = block;
876          --_chunks_free;
877        }
878
879      /* Now see if we can return stuff to the system.  */
880      blocks = _heapinfo[block].free.size;
881      if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
882          && (*__morecore) (0) == ADDRESS (block + blocks))
883        {
884          register __malloc_size_t bytes = blocks * BLOCKSIZE;
885          _heaplimit -= blocks;
886          (*__morecore) (-bytes);
887          _heapinfo[_heapinfo[block].free.prev].free.next
888            = _heapinfo[block].free.next;
889          _heapinfo[_heapinfo[block].free.next].free.prev
890            = _heapinfo[block].free.prev;
891          block = _heapinfo[block].free.prev;
892          --_chunks_free;
893          _bytes_free -= bytes;
894        }
895
896      /* Set the next search to begin at this block.  */
897      _heapindex = block;
898      break;
899
900    default:
901      /* Do some of the statistics.  */
902      --_chunks_used;
903      _bytes_used -= 1 << type;
904      ++_chunks_free;
905      _bytes_free += 1 << type;
906
907      /* Get the address of the first free fragment in this block.  */
908      prev = (struct list *) ((char *) ADDRESS (block) +
909                           (_heapinfo[block].busy.info.frag.first << type));
910
911      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
912        {
913          /* If all fragments of this block are free, remove them
914             from the fragment list and free the whole block.  */
915          next = prev;
916          for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
917            next = next->next;
918          prev->prev->next = next;
919          if (next != NULL)
920            next->prev = prev->prev;
921          _heapinfo[block].busy.type = 0;
922          _heapinfo[block].busy.info.size = 1;
923
924          /* Keep the statistics accurate.  */
925          ++_chunks_used;
926          _bytes_used += BLOCKSIZE;
927          _chunks_free -= BLOCKSIZE >> type;
928          _bytes_free -= BLOCKSIZE;
929
930          free (ADDRESS (block));
931        }
932      else if (_heapinfo[block].busy.info.frag.nfree != 0)
933        {
934          /* If some fragments of this block are free, link this
935             fragment into the fragment list after the first free
936             fragment of this block. */
937          next = (struct list *) ptr;
938          next->next = prev->next;
939          next->prev = prev;
940          prev->next = next;
941          if (next->next != NULL)
942            next->next->prev = next;
943          ++_heapinfo[block].busy.info.frag.nfree;
944        }
945      else
946        {
947          /* No fragments of this block are free, so link this
948             fragment into the fragment list and announce that
949             it is the first free fragment of this block. */
950          prev = (struct list *) ptr;
951          _heapinfo[block].busy.info.frag.nfree = 1;
952          _heapinfo[block].busy.info.frag.first = (unsigned long int)
953            ((unsigned long int) ((char *) ptr - (char *) NULL)
954             % BLOCKSIZE >> type);
955          prev->next = _fraghead[type].next;
956          prev->prev = &_fraghead[type];
957          prev->prev->next = prev;
958          if (prev->next != NULL)
959            prev->next->prev = prev;
960        }
961      break;
962    }
963}
964
965/* Return memory to the heap.  */
966void
967free (ptr)
968     __ptr_t ptr;
969{
970  register struct alignlist *l;
971
972  if (ptr == NULL)
973    return;
974
975  for (l = _aligned_blocks; l != NULL; l = l->next)
976    if (l->aligned == ptr)
977      {
978        l->aligned = NULL;      /* Mark the slot in the list as free.  */
979        ptr = l->exact;
980        break;
981      }
982
983  if (__free_hook != NULL)
984    (*__free_hook) (ptr);
985  else
986    _free_internal (ptr);
987}
988/* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
989This file is part of the GNU C Library.
990
991The GNU C Library is free software; you can redistribute it and/or
992modify it under the terms of the GNU Library General Public License as
993published by the Free Software Foundation; either version 2 of the
994License, or (at your option) any later version.
995
996The GNU C Library is distributed in the hope that it will be useful,
997but WITHOUT ANY WARRANTY; without even the implied warranty of
998MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
999Library General Public License for more details.
1000
1001You should have received a copy of the GNU Library General Public
1002License along with the GNU C Library; see the file COPYING.LIB.  If
1003not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1004Cambridge, MA 02139, USA.  */
1005
1006#ifndef _MALLOC_INTERNAL
1007#define _MALLOC_INTERNAL
1008#include <malloc.h>
1009#endif
1010
1011#ifdef _LIBC
1012
1013#include <ansidecl.h>
1014#include <gnu-stabs.h>
1015
1016#undef  cfree
1017
1018function_alias(cfree, free, void, (ptr),
1019               DEFUN(cfree, (ptr), PTR ptr))
1020
1021#else
1022
1023void
1024cfree (ptr)
1025     __ptr_t ptr;
1026{
1027  free (ptr);
1028}
1029
1030#endif
1031/* Change the size of a block allocated by `malloc'.
1032   Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1033                     Written May 1989 by Mike Haertel.
1034
1035This library is free software; you can redistribute it and/or
1036modify it under the terms of the GNU Library General Public License as
1037published by the Free Software Foundation; either version 2 of the
1038License, or (at your option) any later version.
1039
1040This library is distributed in the hope that it will be useful,
1041but WITHOUT ANY WARRANTY; without even the implied warranty of
1042MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1043Library General Public License for more details.
1044
1045You should have received a copy of the GNU Library General Public
1046License along with this library; see the file COPYING.LIB.  If
1047not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1048Cambridge, MA 02139, USA.
1049
1050   The author may be reached (Email) at the address mike@ai.mit.edu,
1051   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1052
1053#ifndef _MALLOC_INTERNAL
1054#define _MALLOC_INTERNAL
1055#include <malloc.h>
1056#endif
1057
1058#if  (defined (MEMMOVE_MISSING) || \
1059      !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1060
1061/* Snarfed directly from Emacs src/dispnew.c:
1062   XXX Should use system bcopy if it handles overlap.  */
1063#ifndef emacs
1064
1065/* Like bcopy except never gets confused by overlap.  */
1066
1067static void
1068safe_bcopy (from, to, size)
1069     char *from, *to;
1070     int size;
1071{
1072  if (size <= 0 || from == to)
1073    return;
1074
1075  /* If the source and destination don't overlap, then bcopy can
1076     handle it.  If they do overlap, but the destination is lower in
1077     memory than the source, we'll assume bcopy can handle that.  */
1078  if (to < from || from + size <= to)
1079    bcopy (from, to, size);
1080
1081  /* Otherwise, we'll copy from the end.  */
1082  else
1083    {
1084      register char *endf = from + size;
1085      register char *endt = to + size;
1086
1087      /* If TO - FROM is large, then we should break the copy into
1088         nonoverlapping chunks of TO - FROM bytes each.  However, if
1089         TO - FROM is small, then the bcopy function call overhead
1090         makes this not worth it.  The crossover point could be about
1091         anywhere.  Since I don't think the obvious copy loop is too
1092         bad, I'm trying to err in its favor.  */
1093      if (to - from < 64)
1094        {
1095          do
1096            *--endt = *--endf;
1097          while (endf != from);
1098        }
1099      else
1100        {
1101          for (;;)
1102            {
1103              endt -= (to - from);
1104              endf -= (to - from);
1105
1106              if (endt < to)
1107                break;
1108
1109              bcopy (endf, endt, to - from);
1110            }
1111
1112          /* If SIZE wasn't a multiple of TO - FROM, there will be a
1113             little left over.  The amount left over is
1114             (endt + (to - from)) - to, which is endt - from.  */
1115          bcopy (from, to, endt - from);
1116        }
1117    }
1118}     
1119#endif  /* Not emacs.  */
1120
1121#define memmove(to, from, size) safe_bcopy ((from), (to), (size))
1122
1123#endif
1124
1125
1126#define min(A, B) ((A) < (B) ? (A) : (B))
1127
1128/* Debugging hook for realloc.  */
1129__ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
1130
1131/* Resize the given region to the new size, returning a pointer
1132   to the (possibly moved) region.  This is optimized for speed;
1133   some benchmarks seem to indicate that greater compactness is
1134   achieved by unconditionally allocating and copying to a
1135   new region.  This module has incestuous knowledge of the
1136   internals of both free and malloc. */
1137__ptr_t
1138realloc (ptr, size)
1139     __ptr_t ptr;
1140     __malloc_size_t size;
1141{
1142  __ptr_t result;
1143  int type;
1144  __malloc_size_t block, blocks, oldlimit;
1145
1146  if (size == 0)
1147    {
1148      free (ptr);
1149      return malloc (0);
1150    }
1151  else if (ptr == NULL)
1152    return malloc (size);
1153
1154  if (__realloc_hook != NULL)
1155    return (*__realloc_hook) (ptr, size);
1156
1157  block = BLOCK (ptr);
1158
1159  type = _heapinfo[block].busy.type;
1160  switch (type)
1161    {
1162    case 0:
1163      /* Maybe reallocate a large block to a small fragment.  */
1164      if (size <= BLOCKSIZE / 2)
1165        {
1166          result = malloc (size);
1167          if (result != NULL)
1168            {
1169              memcpy (result, ptr, size);
1170              _free_internal (ptr);
1171              return result;
1172            }
1173        }
1174
1175      /* The new size is a large allocation as well;
1176         see if we can hold it in place. */
1177      blocks = BLOCKIFY (size);
1178      if (blocks < _heapinfo[block].busy.info.size)
1179        {
1180          /* The new size is smaller; return
1181             excess memory to the free list. */
1182          _heapinfo[block + blocks].busy.type = 0;
1183          _heapinfo[block + blocks].busy.info.size
1184            = _heapinfo[block].busy.info.size - blocks;
1185          _heapinfo[block].busy.info.size = blocks;
1186          /* We have just created a new chunk by splitting a chunk in two.
1187             Now we will free this chunk; increment the statistics counter
1188             so it doesn't become wrong when _free_internal decrements it.  */
1189          ++_chunks_used;
1190          _free_internal (ADDRESS (block + blocks));
1191          result = ptr;
1192        }
1193      else if (blocks == _heapinfo[block].busy.info.size)
1194        /* No size change necessary.  */
1195        result = ptr;
1196      else
1197        {
1198          /* Won't fit, so allocate a new region that will.
1199             Free the old region first in case there is sufficient
1200             adjacent free space to grow without moving. */
1201          blocks = _heapinfo[block].busy.info.size;
1202          /* Prevent free from actually returning memory to the system.  */
1203          oldlimit = _heaplimit;
1204          _heaplimit = 0;
1205          _free_internal (ptr);
1206          _heaplimit = oldlimit;
1207          result = malloc (size);
1208          if (result == NULL)
1209            {
1210              /* Now we're really in trouble.  We have to unfree
1211                 the thing we just freed.  Unfortunately it might
1212                 have been coalesced with its neighbors.  */
1213              if (_heapindex == block)
1214                (void) malloc (blocks * BLOCKSIZE);
1215              else
1216                {
1217                  __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
1218                  (void) malloc (blocks * BLOCKSIZE);
1219                  _free_internal (previous);
1220                }
1221              return NULL;
1222            }
1223          if (ptr != result)
1224            memmove (result, ptr, blocks * BLOCKSIZE);
1225        }
1226      break;
1227
1228    default:
1229      /* Old size is a fragment; type is logarithm
1230         to base two of the fragment size.  */
1231      if (size > (__malloc_size_t) (1 << (type - 1)) &&
1232          size <= (__malloc_size_t) (1 << type))
1233        /* The new size is the same kind of fragment.  */
1234        result = ptr;
1235      else
1236        {
1237          /* The new size is different; allocate a new space,
1238             and copy the lesser of the new size and the old. */
1239          result = malloc (size);
1240          if (result == NULL)
1241            return NULL;
1242          memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1243          free (ptr);
1244        }
1245      break;
1246    }
1247
1248  return result;
1249}
1250/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1251
1252This library is free software; you can redistribute it and/or
1253modify it under the terms of the GNU Library General Public License as
1254published by the Free Software Foundation; either version 2 of the
1255License, or (at your option) any later version.
1256
1257This library is distributed in the hope that it will be useful,
1258but WITHOUT ANY WARRANTY; without even the implied warranty of
1259MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1260Library General Public License for more details.
1261
1262You should have received a copy of the GNU Library General Public
1263License along with this library; see the file COPYING.LIB.  If
1264not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1265Cambridge, MA 02139, USA.
1266
1267   The author may be reached (Email) at the address mike@ai.mit.edu,
1268   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1269
1270#ifndef _MALLOC_INTERNAL
1271#define _MALLOC_INTERNAL
1272#include <malloc.h>
1273#endif
1274
1275/* Allocate an array of NMEMB elements each SIZE bytes long.
1276   The entire array is initialized to zeros.  */
1277__ptr_t
1278calloc (nmemb, size)
1279     register __malloc_size_t nmemb;
1280     register __malloc_size_t size;
1281{
1282  register __ptr_t result = malloc (nmemb * size);
1283
1284  if (result != NULL)
1285    (void) memset (result, 0, nmemb * size);
1286
1287  return result;
1288}
1289/* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1290This file is part of the GNU C Library.
1291
1292The GNU C Library is free software; you can redistribute it and/or modify
1293it under the terms of the GNU General Public License as published by
1294the Free Software Foundation; either version 2, or (at your option)
1295any later version.
1296
1297The GNU C Library is distributed in the hope that it will be useful,
1298but WITHOUT ANY WARRANTY; without even the implied warranty of
1299MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1300GNU General Public License for more details.
1301
1302You should have received a copy of the GNU General Public License
1303along with the GNU C Library; see the file COPYING.  If not, write to
1304the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
1305
1306#ifndef _MALLOC_INTERNAL
1307#define _MALLOC_INTERNAL
1308#include <malloc.h>
1309#endif
1310
1311#ifndef __GNU_LIBRARY__
1312#define __sbrk  sbrk
1313#endif
1314
1315#ifdef __GNU_LIBRARY__
1316#ifndef __GLIBC__
1317/* It is best not to declare this and cast its result on foreign operating
1318   systems with potentially hostile include files.  */
1319extern __ptr_t __sbrk __P ((int increment));
1320#endif
1321#endif
1322
1323#ifndef NULL
1324#define NULL 0
1325#endif
1326
1327/* Allocate INCREMENT more bytes of data space,
1328   and return the start of data space, or NULL on errors.
1329   If INCREMENT is negative, shrink data space.  */
1330__ptr_t
1331__default_morecore (increment)
1332#ifdef __STDC__
1333     ptrdiff_t increment;
1334#else
1335     int increment;
1336#endif
1337{
1338  __ptr_t result = (__ptr_t) __sbrk ((int) increment);
1339  if (result == (__ptr_t) -1)
1340    return NULL;
1341  return result;
1342}
1343/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1344
1345This library is free software; you can redistribute it and/or
1346modify it under the terms of the GNU Library General Public License as
1347published by the Free Software Foundation; either version 2 of the
1348License, or (at your option) any later version.
1349
1350This library is distributed in the hope that it will be useful,
1351but WITHOUT ANY WARRANTY; without even the implied warranty of
1352MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1353Library General Public License for more details.
1354
1355You should have received a copy of the GNU Library General Public
1356License along with this library; see the file COPYING.LIB.  If
1357not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1358Cambridge, MA 02139, USA.  */
1359
1360#ifndef _MALLOC_INTERNAL
1361#define _MALLOC_INTERNAL
1362#include <malloc.h>
1363#endif
1364
1365__ptr_t (*__memalign_hook) __P ((size_t __size, size_t __alignment));
1366
1367__ptr_t
1368memalign (alignment, size)
1369     __malloc_size_t alignment;
1370     __malloc_size_t size;
1371{
1372  __ptr_t result;
1373  unsigned long int adj;
1374
1375  if (__memalign_hook)
1376    return (*__memalign_hook) (alignment, size);
1377
1378  size = ((size + alignment - 1) / alignment) * alignment;
1379
1380  result = malloc (size);
1381  if (result == NULL)
1382    return NULL;
1383  adj = (unsigned long int) ((unsigned long int) ((char *) result -
1384                                                  (char *) NULL)) % alignment;
1385  if (adj != 0)
1386    {
1387      struct alignlist *l;
1388      for (l = _aligned_blocks; l != NULL; l = l->next)
1389        if (l->aligned == NULL)
1390          /* This slot is free.  Use it.  */
1391          break;
1392      if (l == NULL)
1393        {
1394          l = (struct alignlist *) malloc (sizeof (struct alignlist));
1395          if (l == NULL)
1396            {
1397              free (result);
1398              return NULL;
1399            }
1400          l->next = _aligned_blocks;
1401          _aligned_blocks = l;
1402        }
1403      l->exact = result;
1404      result = l->aligned = (char *) result + alignment - adj;
1405    }
1406
1407  return result;
1408}
1409
1410#endif /* HAVE_GMALLOC */
Note: See TracBrowser for help on using the repository browser.