source: git/Singular/gmalloc.c @ fdc537

spielwiese
Last change on this file since fdc537 was acd643, checked in by Olaf Bachmann <obachman@…>, 25 years ago
* fixed gmalloc bug when allocating huge chunks of memory git-svn-id: file:///usr/local/Singular/svn/trunk@3645 2c84dea3-7e68-4137-9b89-c4e89433aadc
  • Property mode set to 100644
File size: 42.8 KB
Line 
1/****************************************
2*  Computer Algebra System SINGULAR     *
3****************************************/
4/* $Id: */
5
6/* gmalloc used by Singular to have a trusted malloc and valloc
7   slightly edited to include mod2.h and to only provide its functionality
8   if HAVE_GMALLOC is defined
9*/
10
11#ifdef HAVE_CONFIG_H
12#include "mod2.h"
13#endif
14
15/* #ifdef HAVE_GMALLOC */
16#if 1
17
18#define __USE_XOPEN
19#define __USE_XOPEN_EXTENDED
20#define _MALLOC_INTERNAL
21
22/* The malloc headers and source files from the C library follow here.  */
23
24/* Declarations for `malloc' and friends.
25   Copyright 1990, 1991, 1992, 1993, 1995 Free Software Foundation, Inc.
26                  Written May 1989 by Mike Haertel.
27
28This library is free software; you can redistribute it and/or
29modify it under the terms of the GNU Library General Public License as
30published by the Free Software Foundation; either version 2 of the
31License, or (at your option) any later version.
32
33This library is distributed in the hope that it will be useful,
34but WITHOUT ANY WARRANTY; without even the implied warranty of
35MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
36Library General Public License for more details.
37
38You should have received a copy of the GNU Library General Public
39License along with this library; see the file COPYING.LIB.  If
40not, write to the Free Software Foundation, Inc., 675 Mass Ave,
41Cambridge, MA 02139, USA.
42
43   The author may be reached (Email) at the address mike@ai.mit.edu,
44   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
45
46#ifndef _MALLOC_H
47
48#define _MALLOC_H        1
49
50#ifdef _MALLOC_INTERNAL
51
52#if        defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
53#include <string.h>
54#else
55#ifndef memset
56#define memset(s, zero, n) bzero ((s), (n))
57#endif
58#ifndef memcpy
59#define memcpy(d, s, n)    bcopy ((s), (d), (n))
60#endif
61#endif
62
63#if defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
64#include <limits.h>
65#else
66#ifndef CHAR_BIT
67#define CHAR_BIT 8
68#endif
69#endif
70
71#ifdef HAVE_UNISTD_H
72#include <unistd.h>
73#endif
74
75#endif /* _MALLOC_INTERNAL.  */
76
77
78#ifdef __cplusplus
79extern "C"
80{
81#endif
82
83#if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
84#undef  __P
85#define __P(args) args
86#undef  __ptr_t
87#define __ptr_t   void *
88#else /* Not C++ or ANSI C.  */
89#undef  __P
90#define __P(args) ()
91#undef  const
92#define const
93#undef  __ptr_t
94#define __ptr_t char *
95#endif /* C++ or ANSI C.  */
96
97#if defined (__STDC__) && __STDC__
98#include <stddef.h>
99#define  __malloc_size_t    size_t
100#define  __malloc_ptrdiff_t ptrdiff_t
101#else
102#define  __malloc_size_t    unsigned long
103#define  __malloc_ptrdiff_t long
104#endif
105
106#ifndef  NULL
107#define  NULL 0
108#endif
109
110
111/* Allocate SIZE bytes of memory.  */
112extern __ptr_t malloc __P ((__malloc_size_t __size));
113/* Re-allocate the previously allocated block
114   in __ptr_t, making the new block SIZE bytes long.  */
115extern __ptr_t realloc __P ((__ptr_t __ptr, __malloc_size_t __size));
116/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
117extern __ptr_t calloc __P ((__malloc_size_t __nmemb, __malloc_size_t __size));
118/* Free a block allocated by `malloc', `realloc' or `calloc'.  */
119extern void free __P ((__ptr_t __ptr));
120
121/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
122extern __ptr_t memalign __P ((__malloc_size_t __alignment,
123                              __malloc_size_t __size));
124
125/* Allocate SIZE bytes on a page boundary.  */
126extern __ptr_t valloc __P ((__malloc_size_t __size));
127
128
129#ifdef _MALLOC_INTERNAL
130
131/* The allocator divides the heap into blocks of fixed size; large
132   requests receive one or more whole blocks, and small requests
133   receive a fragment of a block.  Fragment sizes are powers of two,
134   and all fragments of a block are the same size.  When all the
135   fragments in a block have been freed, the block itself is freed.  */
136#define INT_BIT        (CHAR_BIT * sizeof(int))
137#ifdef __alpha
138#define BLOCKLOG       (13)
139#else
140#define BLOCKLOG       (INT_BIT > 16 ? 12 : 9)
141#endif
142#define BLOCKSIZE      (1 << BLOCKLOG)
143#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
144
145/* Determine the amount of memory spanned by the initial heap table
146   (not an absolute limit).  */
147#define HEAP           (INT_BIT > 16 ? 4194304 : 65536)
148
149/* Number of contiguous free blocks allowed to build up at the end of
150   memory before they will be returned to the system.  */
151#define FINAL_FREE_BLOCKS 8
152
153/* Data structure giving per-block information.  */
154typedef union
155  {
156    /* Heap information for a busy block.  */
157    struct
158    {
159      /* Zero for a large (multiblock) object, or positive giving the
160         logarithm to the base two of the fragment size.  */
161      int type;
162      union
163      {
164        struct
165        {
166          __malloc_size_t nfree; /* Free frags in a fragmented block.  */
167          __malloc_size_t first; /* First free fragment of the block.  */
168        } frag;
169        /* For a large object, in its first block, this has the number
170           of blocks in the object.  In the other blocks, this has a
171           negative number which says how far back the first block is.  */
172        __malloc_ptrdiff_t size;
173      } info;
174    } busy;
175    /* Heap information for a free block
176       (that may be the first of a free cluster).  */
177    struct
178    {
179      __malloc_size_t size;        /* Size (in blocks) of a free cluster.  */
180      __malloc_size_t next;        /* Index of next free cluster.  */
181      __malloc_size_t prev;        /* Index of previous free cluster.  */
182    } free;
183  } malloc_info;
184
185/* Pointer to first block of the heap.  */
186extern char *_heapbase;
187
188/* Table indexed by block number giving per-block information.  */
189extern malloc_info *_heapinfo;
190
191/* Address to block number and vice versa.  */
192#define BLOCK(A)   (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
193#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
194
195/* Current search index for the heap table.  */
196extern __malloc_size_t _heapindex;
197
198/* Limit of valid info table indices.  */
199extern __malloc_size_t _heaplimit;
200
201/* Doubly linked lists of free fragments.  */
202struct list
203{
204  struct list *next;
205  struct list *prev;
206};
207
208/* Free list headers for each fragment size.  */
209extern struct list _fraghead[];
210
211/* List of blocks allocated with `memalign' (or `valloc').  */
212struct alignlist
213{
214  struct alignlist *next;
215  __ptr_t aligned;        /* The address that memaligned returned.  */
216  __ptr_t exact;          /* The address that malloc returned.  */
217};
218extern struct alignlist *_aligned_blocks;
219
220/* Instrumentation.  */
221extern __malloc_size_t _chunks_used;
222extern __malloc_size_t _bytes_used;
223extern __malloc_size_t _chunks_free;
224extern __malloc_size_t _bytes_free;
225
226/* Internal version of `free' used in `morecore' (malloc.c). */
227extern void _free_internal __P ((__ptr_t __ptr));
228
229#endif /* _MALLOC_INTERNAL.  */
230
231/* Given an address in the middle of a malloc'd object,
232   return the address of the beginning of the object.  */
233extern __ptr_t malloc_find_object_address __P ((__ptr_t __ptr));
234
235/* Underlying allocation function; successive calls should
236   return contiguous pieces of memory.  */
237extern __ptr_t (*__morecore) __P ((__malloc_ptrdiff_t __size));
238
239/* Default value of `__morecore'.  */
240extern __ptr_t __default_morecore __P ((__malloc_ptrdiff_t __size));
241
242/* If not NULL, this function is called after each time
243   `__morecore' is called to increase the data size.  */
244extern void (*__after_morecore_hook) __P ((void));
245
246/* Nonzero if `malloc' has been called and done its initialization.  */
247extern int __malloc_initialized;
248
249/* Hooks for debugging versions.  */
250extern void (*__malloc_initialize_hook) __P ((void));
251extern void (*__free_hook) __P ((__ptr_t __ptr));
252extern __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
253extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
254extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size,
255                                        __malloc_size_t __alignment));
256
257/* Return values for `mprobe': these are the kinds of inconsistencies that
258   `mcheck' enables detection of.  */
259enum mcheck_status
260{
261  MCHECK_DISABLED = -1,  /* Consistency checking is not turned on.  */
262  MCHECK_OK,             /* Block is fine.  */
263  MCHECK_FREE,           /* Block freed twice.  */
264  MCHECK_HEAD,           /* Memory before the block was clobbered.  */
265  MCHECK_TAIL            /* Memory after the block was clobbered.  */
266};
267
268/* Activate a standard collection of debugging hooks.  This must be called
269   before `malloc' is ever called.  ABORTFUNC is called with an error code
270   (see enum above) when an inconsistency is detected.  If ABORTFUNC is
271   null, the standard function prints on stderr and then calls `abort'.  */
272extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
273
274/* Check for aberrations in a particular malloc'd block.  You must have
275   called `mcheck' already.  These are the same checks that `mcheck' does
276   when you free or reallocate a block.  */
277extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
278
279/* Activate a standard collection of tracing hooks.  */
280extern void mtrace __P ((void));
281extern void muntrace __P ((void));
282
283/* Statistics available to the user.  */
284struct mstats
285{
286  __malloc_size_t bytes_total; /* Total size of the heap. */
287  __malloc_size_t chunks_used; /* Chunks allocated by the user. */
288  __malloc_size_t bytes_used;  /* Byte total of user-allocated chunks. */
289  __malloc_size_t chunks_free; /* Chunks in the free list. */
290    __malloc_size_t bytes_free;/* Byte total of chunks in the free list. */
291};
292
293/* Pick up the current statistics. */
294extern struct mstats mstats __P ((void));
295
296/* Call WARNFUN with a warning message when memory usage is high.  */
297extern void memory_warnings __P ((__ptr_t __start,
298                                  void (*__warnfun) __P ((const char *))));
299
300
301/* Relocating allocator.  */
302
303/* Allocate SIZE bytes, and store the address in *HANDLEPTR.  */
304extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
305
306/* Free the storage allocated in HANDLEPTR.  */
307extern void r_alloc_free __P ((__ptr_t *__handleptr));
308
309/* Adjust the block at HANDLEPTR to be SIZE bytes long.  */
310extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
311
312
313#ifdef __cplusplus
314}
315#endif
316
317#endif /* malloc.h  */
318/* Allocate memory on a page boundary.
319   Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
320
321This library is free software; you can redistribute it and/or
322modify it under the terms of the GNU Library General Public License as
323published by the Free Software Foundation; either version 2 of the
324License, or (at your option) any later version.
325
326This library is distributed in the hope that it will be useful,
327but WITHOUT ANY WARRANTY; without even the implied warranty of
328MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
329Library General Public License for more details.
330
331You should have received a copy of the GNU Library General Public
332License along with this library; see the file COPYING.LIB.  If
333not, write to the Free Software Foundation, Inc., 675 Mass Ave,
334Cambridge, MA 02139, USA.
335
336   The author may be reached (Email) at the address mike@ai.mit.edu,
337   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
338
339#if defined (__GNU_LIBRARY__) || defined (_LIBC)
340#include <stddef.h>
341#include <sys/cdefs.h>
342/* obachman: no declaration: conflicts with gnulibc6 unistd.h */
343/* extern size_t __getpagesize __P ((void)); */
344#else
345#if 0 /* obachman: pasted in getpagesize.h manually */
346#include "getpagesize.h"
347#else
348
349#ifdef VMS
350#define getpagesize() 512
351#endif
352
353#ifdef HAVE_UNISTD_H
354#include <unistd.h>
355#endif
356
357#ifdef _SC_PAGESIZE
358#define getpagesize() sysconf(_SC_PAGESIZE)
359#else
360
361#include <sys/param.h>
362
363#ifdef EXEC_PAGESIZE
364#define getpagesize() EXEC_PAGESIZE
365#else
366#ifdef NBPG
367#define getpagesize() NBPG * CLSIZE
368#ifndef CLSIZE
369#define CLSIZE 1
370#endif /* no CLSIZE */
371#else /* no NBPG */
372#ifdef NBPC
373#define getpagesize() NBPC
374#else /* no NBPC */
375#ifdef PAGESIZE
376#define getpagesize() PAGESIZE
377#endif
378#endif /* NBPC */
379#endif /* no NBPG */
380#endif /* no EXEC_PAGESIZE */
381#endif /* no _SC_PAGESIZE */
382
383/* obachman: undef , gnulibc6 conflict with unistd.h */
384#define __getpagesize() getpagesize()
385#endif /* if 0 */
386#endif
387
388#ifndef _MALLOC_INTERNAL
389#define _MALLOC_INTERNAL
390#include <malloc.h>
391#endif
392
393static __malloc_size_t pagesize;
394
395__ptr_t
396valloc (size)
397     __malloc_size_t size;
398{
399  if (pagesize == 0)
400/* obachman: use getpagesize, instead
401    pagesize = __getpagesize ();
402*/
403    pagesize = getpagesize ();
404
405  return memalign (pagesize, size);
406}
407/* Memory allocator `malloc'.
408   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
409                  Written May 1989 by Mike Haertel.
410
411This library is free software; you can redistribute it and/or
412modify it under the terms of the GNU Library General Public License as
413published by the Free Software Foundation; either version 2 of the
414License, or (at your option) any later version.
415
416This library is distributed in the hope that it will be useful,
417but WITHOUT ANY WARRANTY; without even the implied warranty of
418MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
419Library General Public License for more details.
420
421You should have received a copy of the GNU Library General Public
422License along with this library; see the file COPYING.LIB.  If
423not, write to the Free Software Foundation, Inc., 675 Mass Ave,
424Cambridge, MA 02139, USA.
425
426   The author may be reached (Email) at the address mike@ai.mit.edu,
427   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
428
429#ifndef        _MALLOC_INTERNAL
430#define _MALLOC_INTERNAL
431#include <malloc.h>
432#endif
433
434/* How to really get more memory.  */
435__ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
436
437/* Debugging hook for `malloc'.  */
438__ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
439
440/* Pointer to the base of the first block.  */
441char *_heapbase;
442
443/* Block information table.  Allocated with align/__free (not malloc/free).  */
444malloc_info *_heapinfo;
445
446/* Number of info entries.  */
447static __malloc_size_t heapsize;
448
449/* Search index in the info table.  */
450__malloc_size_t _heapindex;
451
452/* Limit of valid info table indices.  */
453__malloc_size_t _heaplimit;
454
455/* Free lists for each fragment size.  */
456struct list _fraghead[BLOCKLOG];
457
458/* Instrumentation.  */
459__malloc_size_t _chunks_used;
460__malloc_size_t _bytes_used;
461__malloc_size_t _chunks_free;
462__malloc_size_t _bytes_free;
463
464/* Are you experienced?  */
465int __malloc_initialized;
466
467void (*__malloc_initialize_hook) __P ((void));
468void (*__after_morecore_hook) __P ((void));
469
470/* Aligned allocation.  */
471static __ptr_t align __P ((__malloc_size_t));
472static __ptr_t
473align (size)
474     __malloc_size_t size;
475{
476  __ptr_t result;
477  unsigned long int adj;
478 
479  /* 9/99 obachman@mathematik.uni-kl.de: prevent calling morecore
480     with negative arguments here */
481  if ((ptrdiff_t) size < 0) return NULL;
482 
483  result = (*__morecore) (size);
484  adj = (unsigned long int) ((unsigned long int) ((char *) result -
485                                                  (char *) NULL)) % BLOCKSIZE;
486  if (adj != 0)
487  {
488    adj = BLOCKSIZE - adj;
489    (void) (*__morecore) (adj);
490    result = (char *) result + adj;
491  }
492
493  if (__after_morecore_hook)
494    (*__after_morecore_hook) ();
495
496  return result;
497}
498
499/* Set everything up and remember that we have.  */
500static int initialize __P ((void));
501static int
502initialize ()
503{
504  if (__malloc_initialize_hook)
505    (*__malloc_initialize_hook) ();
506
507  heapsize = HEAP / BLOCKSIZE;
508  _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
509  if (_heapinfo == NULL)
510    return 0;
511  memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
512  _heapinfo[0].free.size = 0;
513  _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
514  _heapindex = 0;
515  _heapbase = (char *) _heapinfo;
516
517  /* Account for the _heapinfo block itself in the statistics.  */
518  _bytes_used = heapsize * sizeof (malloc_info);
519  _chunks_used = 1;
520
521  __malloc_initialized = 1;
522  return 1;
523}
524
525/* Get neatly aligned memory, initializing or
526   growing the heap info table as necessary. */
527static __ptr_t morecore __P ((__malloc_size_t));
528static __ptr_t
529morecore (size)
530     __malloc_size_t size;
531{
532  __ptr_t result;
533  malloc_info *newinfo, *oldinfo;
534  __malloc_size_t newsize;
535
536  result = align (size);
537  if (result == NULL)
538    return NULL;
539
540  /* Check if we need to grow the info table.  */
541  if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
542  {
543    newsize = heapsize;
544    while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
545      newsize *= 2;
546    newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
547    if (newinfo == NULL)
548    {
549      (*__morecore) (-size);
550      return NULL;
551    }
552    memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
553    memset (&newinfo[heapsize], 0,
554            (newsize - heapsize) * sizeof (malloc_info));
555    oldinfo = _heapinfo;
556    newinfo[BLOCK (oldinfo)].busy.type = 0;
557    newinfo[BLOCK (oldinfo)].busy.info.size
558      = BLOCKIFY (heapsize * sizeof (malloc_info));
559    _heapinfo = newinfo;
560    /* Account for the _heapinfo block itself in the statistics.  */
561    _bytes_used += newsize * sizeof (malloc_info);
562    ++_chunks_used;
563    _free_internal (oldinfo);
564    heapsize = newsize;
565  }
566
567  _heaplimit = BLOCK ((char *) result + size);
568  return result;
569}
570
571/* Allocate memory from the heap.  */
572__ptr_t
573malloc (size)
574     __malloc_size_t size;
575{
576  __ptr_t result;
577  __malloc_size_t block, blocks, lastblocks, start;
578  register __malloc_size_t i;
579  struct list *next;
580
581  /* ANSI C allows `malloc (0)' to either return NULL, or to return a
582     valid address you can realloc and free (though not dereference).
583
584     It turns out that some extant code (sunrpc, at least Ultrix's version)
585     expects `malloc (0)' to return non-NULL and breaks otherwise.
586     Be compatible.  */
587
588#if        0
589  if (size == 0)
590    return NULL;
591#endif
592
593  if (__malloc_hook != NULL)
594    return (*__malloc_hook) (size);
595
596  if (!__malloc_initialized)
597    if (!initialize ())
598      return NULL;
599
600  if (size < sizeof (struct list))
601    size = sizeof (struct list);
602
603#ifdef SUNOS_LOCALTIME_BUG
604  if (size < 16)
605    size = 16;
606#endif
607
608  /* Determine the allocation policy based on the request size.  */
609  if (size <= BLOCKSIZE / 2)
610  {
611    /* Small allocation to receive a fragment of a block.
612       Determine the logarithm to base two of the fragment size. */
613    register __malloc_size_t log = 1;
614    --size;
615    while ((size /= 2) != 0)
616      ++log;
617
618    /* Look in the fragment lists for a
619       free fragment of the desired size. */
620    next = _fraghead[log].next;
621    if (next != NULL)
622    {
623      /* There are free fragments of this size.
624         Pop a fragment out of the fragment list and return it.
625         Update the block's nfree and first counters. */
626      result = (__ptr_t) next;
627      next->prev->next = next->next;
628      if (next->next != NULL)
629        next->next->prev = next->prev;
630      block = BLOCK (result);
631      if (--_heapinfo[block].busy.info.frag.nfree != 0)
632        _heapinfo[block].busy.info.frag.first = (unsigned long int)
633        ((unsigned long int) ((char *) next->next - (char *) NULL)
634          % BLOCKSIZE) >> log;
635
636      /* Update the statistics.  */
637      ++_chunks_used;
638      _bytes_used += 1 << log;
639      --_chunks_free;
640      _bytes_free -= 1 << log;
641    }
642    else
643    {
644      /* No free fragments of the desired size, so get a new block
645         and break it into fragments, returning the first.  */
646      result = malloc (BLOCKSIZE);
647      if (result == NULL)
648        return NULL;
649
650      /* Link all fragments but the first into the free list.  */
651      for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
652      {
653        next = (struct list *) ((char *) result + (i << log));
654        next->next = _fraghead[log].next;
655        next->prev = &_fraghead[log];
656        next->prev->next = next;
657        if (next->next != NULL)
658          next->next->prev = next;
659      }
660
661      /* Initialize the nfree and first counters for this block.  */
662      block = BLOCK (result);
663      _heapinfo[block].busy.type = log;
664      _heapinfo[block].busy.info.frag.nfree = i - 1;
665      _heapinfo[block].busy.info.frag.first = i - 1;
666
667      _chunks_free += (BLOCKSIZE >> log) - 1;
668      _bytes_free += BLOCKSIZE - (1 << log);
669      _bytes_used -= BLOCKSIZE - (1 << log);
670    }
671  }
672  else
673  {
674    /* Large allocation to receive one or more blocks.
675       Search the free list in a circle starting at the last place visited.
676       If we loop completely around without finding a large enough
677       space we will have to get more memory from the system.  */
678    blocks = BLOCKIFY (size);
679    start = block = _heapindex;
680    while (_heapinfo[block].free.size < blocks)
681    {
682      block = _heapinfo[block].free.next;
683      if (block == start)
684      {
685        /* Need to get more from the system.  Check to see if
686           the new core will be contiguous with the final free
687           block; if so we don't need to get as much.  */
688        block = _heapinfo[0].free.prev;
689        lastblocks = _heapinfo[block].free.size;
690        if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
691          (*__morecore) (0) == ADDRESS (block + lastblocks) &&
692          (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
693        {
694          /* Which block we are extending (the `final free
695             block' referred to above) might have changed, if
696             it got combined with a freed info table.  */
697          block = _heapinfo[0].free.prev;
698          _heapinfo[block].free.size += (blocks - lastblocks);
699          _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
700          continue;
701        }
702        result = morecore (blocks * BLOCKSIZE);
703        if (result == NULL)
704          return NULL;
705        block = BLOCK (result);
706        _heapinfo[block].busy.type = 0;
707        _heapinfo[block].busy.info.size = blocks;
708        ++_chunks_used;
709        _bytes_used += blocks * BLOCKSIZE;
710        return result;
711      }
712    }
713
714    /* At this point we have found a suitable free list entry.
715       Figure out how to remove what we need from the list. */
716    result = ADDRESS (block);
717    if (_heapinfo[block].free.size > blocks)
718    {
719      /* The block we found has a bit left over,
720         so relink the tail end back into the free list. */
721      _heapinfo[block + blocks].free.size
722        = _heapinfo[block].free.size - blocks;
723      _heapinfo[block + blocks].free.next
724        = _heapinfo[block].free.next;
725      _heapinfo[block + blocks].free.prev
726        = _heapinfo[block].free.prev;
727      _heapinfo[_heapinfo[block].free.prev].free.next
728        = _heapinfo[_heapinfo[block].free.next].free.prev
729        = _heapindex = block + blocks;
730    }
731    else
732    {
733      /* The block exactly matches our requirements,
734         so just remove it from the list. */
735      _heapinfo[_heapinfo[block].free.next].free.prev
736        = _heapinfo[block].free.prev;
737      _heapinfo[_heapinfo[block].free.prev].free.next
738        = _heapindex = _heapinfo[block].free.next;
739      --_chunks_free;
740    }
741
742    _heapinfo[block].busy.type = 0;
743    _heapinfo[block].busy.info.size = blocks;
744    ++_chunks_used;
745    _bytes_used += blocks * BLOCKSIZE;
746    _bytes_free -= blocks * BLOCKSIZE;
747
748    /* Mark all the blocks of the object just allocated except for the
749       first with a negative number so you can find the first block by
750       adding that adjustment.  */
751    while (--blocks > 0)
752     _heapinfo[block + blocks].busy.info.size = -blocks;
753  }
754
755  return result;
756}
757
758#ifndef _LIBC
759
760/* On some ANSI C systems, some libc functions call _malloc, _free
761   and _realloc.  Make them use the GNU functions.  */
762
763__ptr_t
764_malloc (size)
765     __malloc_size_t size;
766{
767  return malloc (size);
768}
769
770void
771_free (ptr)
772     __ptr_t ptr;
773{
774  free (ptr);
775}
776
777__ptr_t
778_realloc (ptr, size)
779     __ptr_t ptr;
780     __malloc_size_t size;
781{
782  return realloc (ptr, size);
783}
784
785#endif
786/* Free a block of memory allocated by `malloc'.
787   Copyright 1990, 1991, 1992, 1994 Free Software Foundation, Inc.
788   Written May 1989 by Mike Haertel.
789
790This library is free software; you can redistribute it and/or
791modify it under the terms of the GNU Library General Public License as
792published by the Free Software Foundation; either version 2 of the
793License, or (at your option) any later version.
794
795This library is distributed in the hope that it will be useful,
796but WITHOUT ANY WARRANTY; without even the implied warranty of
797MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
798Library General Public License for more details.
799
800You should have received a copy of the GNU Library General Public
801License along with this library; see the file COPYING.LIB.  If
802not, write to the Free Software Foundation, Inc., 675 Mass Ave,
803Cambridge, MA 02139, USA.
804
805   The author may be reached (Email) at the address mike@ai.mit.edu,
806   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
807
808#ifndef        _MALLOC_INTERNAL
809#define _MALLOC_INTERNAL
810#include <malloc.h>
811#endif
812
813/* Debugging hook for free.  */
814void (*__free_hook) __P ((__ptr_t __ptr));
815
816/* List of blocks allocated by memalign.  */
817struct alignlist *_aligned_blocks = NULL;
818
819/* Return memory to the heap.
820   Like `free' but don't call a __free_hook if there is one.  */
821void
822_free_internal (ptr)
823     __ptr_t ptr;
824{
825  int type;
826  __malloc_size_t block, blocks;
827  register __malloc_size_t i;
828  struct list *prev, *next;
829
830  block = BLOCK (ptr);
831
832  type = _heapinfo[block].busy.type;
833  switch (type)
834  {
835    case 0:
836      /* Get as many statistics as early as we can.  */
837      --_chunks_used;
838      _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
839      _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
840
841      /* Find the free cluster previous to this one in the free list.
842         Start searching at the last block referenced; this may benefit
843         programs with locality of allocation.  */
844      i = _heapindex;
845      if (i > block)
846        while (i > block)
847          i = _heapinfo[i].free.prev;
848      else
849      {
850        do
851          i = _heapinfo[i].free.next;
852        while (i > 0 && i < block);
853        i = _heapinfo[i].free.prev;
854      }
855
856      /* Determine how to link this block into the free list.  */
857      if (block == i + _heapinfo[i].free.size)
858      {
859        /* Coalesce this block with its predecessor.  */
860        _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
861        block = i;
862      }
863      else
864      {
865        /* Really link this block back into the free list.  */
866        _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
867        _heapinfo[block].free.next = _heapinfo[i].free.next;
868        _heapinfo[block].free.prev = i;
869        _heapinfo[i].free.next = block;
870        _heapinfo[_heapinfo[block].free.next].free.prev = block;
871        ++_chunks_free;
872      }
873
874      /* Now that the block is linked in, see if we can coalesce it
875         with its successor (by deleting its successor from the list
876         and adding in its size).  */
877      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
878      {
879        _heapinfo[block].free.size
880          += _heapinfo[_heapinfo[block].free.next].free.size;
881        _heapinfo[block].free.next
882          = _heapinfo[_heapinfo[block].free.next].free.next;
883        _heapinfo[_heapinfo[block].free.next].free.prev = block;
884        --_chunks_free;
885      }
886
887      /* Now see if we can return stuff to the system.  */
888      blocks = _heapinfo[block].free.size;
889      if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
890        && (*__morecore) (0) == ADDRESS (block + blocks))
891      {
892        register __malloc_size_t bytes = blocks * BLOCKSIZE;
893        _heaplimit -= blocks;
894        (*__morecore) (-bytes);
895        _heapinfo[_heapinfo[block].free.prev].free.next
896          = _heapinfo[block].free.next;
897        _heapinfo[_heapinfo[block].free.next].free.prev
898          = _heapinfo[block].free.prev;
899        block = _heapinfo[block].free.prev;
900        --_chunks_free;
901        _bytes_free -= bytes;
902      }
903
904      /* Set the next search to begin at this block.  */
905      _heapindex = block;
906      break;
907
908    default:
909      /* Do some of the statistics.  */
910      --_chunks_used;
911      _bytes_used -= 1 << type;
912      ++_chunks_free;
913      _bytes_free += 1 << type;
914
915      /* Get the address of the first free fragment in this block.  */
916      prev = (struct list *) ((char *) ADDRESS (block) +
917                           (_heapinfo[block].busy.info.frag.first << type));
918
919      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
920      {
921        /* If all fragments of this block are free, remove them
922           from the fragment list and free the whole block.  */
923        next = prev;
924        for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
925          next = next->next;
926        prev->prev->next = next;
927        if (next != NULL)
928          next->prev = prev->prev;
929        _heapinfo[block].busy.type = 0;
930        _heapinfo[block].busy.info.size = 1;
931
932        /* Keep the statistics accurate.  */
933        ++_chunks_used;
934        _bytes_used += BLOCKSIZE;
935        _chunks_free -= BLOCKSIZE >> type;
936        _bytes_free -= BLOCKSIZE;
937
938        free (ADDRESS (block));
939      }
940      else if (_heapinfo[block].busy.info.frag.nfree != 0)
941      {
942        /* If some fragments of this block are free, link this
943           fragment into the fragment list after the first free
944           fragment of this block. */
945        next = (struct list *) ptr;
946        next->next = prev->next;
947        next->prev = prev;
948        prev->next = next;
949        if (next->next != NULL)
950          next->next->prev = next;
951        ++_heapinfo[block].busy.info.frag.nfree;
952      }
953      else
954      {
955        /* No fragments of this block are free, so link this
956           fragment into the fragment list and announce that
957           it is the first free fragment of this block. */
958        prev = (struct list *) ptr;
959        _heapinfo[block].busy.info.frag.nfree = 1;
960        _heapinfo[block].busy.info.frag.first = (unsigned long int)
961          ((unsigned long int) ((char *) ptr - (char *) NULL)
962           % BLOCKSIZE >> type);
963        prev->next = _fraghead[type].next;
964        prev->prev = &_fraghead[type];
965        prev->prev->next = prev;
966        if (prev->next != NULL)
967          prev->next->prev = prev;
968      }
969      break;
970    }
971}
972
973/* Return memory to the heap.  */
974void
975free (ptr)
976     __ptr_t ptr;
977{
978  register struct alignlist *l;
979
980  if (ptr == NULL)
981    return;
982
983  for (l = _aligned_blocks; l != NULL; l = l->next)
984    if (l->aligned == ptr)
985    {
986      l->aligned = NULL;        /* Mark the slot in the list as free.  */
987      ptr = l->exact;
988      break;
989    }
990
991  if (__free_hook != NULL)
992    (*__free_hook) (ptr);
993  else
994    _free_internal (ptr);
995}
996/* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
997This file is part of the GNU C Library.
998
999The GNU C Library is free software; you can redistribute it and/or
1000modify it under the terms of the GNU Library General Public License as
1001published by the Free Software Foundation; either version 2 of the
1002License, or (at your option) any later version.
1003
1004The GNU C Library is distributed in the hope that it will be useful,
1005but WITHOUT ANY WARRANTY; without even the implied warranty of
1006MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1007Library General Public License for more details.
1008
1009You should have received a copy of the GNU Library General Public
1010License along with the GNU C Library; see the file COPYING.LIB.  If
1011not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1012Cambridge, MA 02139, USA.  */
1013
1014#ifndef        _MALLOC_INTERNAL
1015#define _MALLOC_INTERNAL
1016#include <malloc.h>
1017#endif
1018
1019#ifdef _LIBC
1020
1021#include <ansidecl.h>
1022#include <gnu-stabs.h>
1023
1024#undef cfree
1025
1026function_alias(cfree, free, void, (ptr),
1027               DEFUN(cfree, (ptr), PTR ptr))
1028
1029#else
1030
1031void
1032cfree (ptr)
1033     __ptr_t ptr;
1034{
1035  free (ptr);
1036}
1037
1038#endif
1039/* Change the size of a block allocated by `malloc'.
1040   Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1041                     Written May 1989 by Mike Haertel.
1042
1043This library is free software; you can redistribute it and/or
1044modify it under the terms of the GNU Library General Public License as
1045published by the Free Software Foundation; either version 2 of the
1046License, or (at your option) any later version.
1047
1048This library is distributed in the hope that it will be useful,
1049but WITHOUT ANY WARRANTY; without even the implied warranty of
1050MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1051Library General Public License for more details.
1052
1053You should have received a copy of the GNU Library General Public
1054License along with this library; see the file COPYING.LIB.  If
1055not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1056Cambridge, MA 02139, USA.
1057
1058   The author may be reached (Email) at the address mike@ai.mit.edu,
1059   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1060
1061#ifndef        _MALLOC_INTERNAL
1062#define _MALLOC_INTERNAL
1063#include <malloc.h>
1064#endif
1065
1066#if  (defined (MEMMOVE_MISSING) || \
1067      !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1068
1069/* Snarfed directly from Emacs src/dispnew.c:
1070   XXX Should use system bcopy if it handles overlap.  */
1071#ifndef emacs
1072
1073/* Like bcopy except never gets confused by overlap.  */
1074
1075static void
1076safe_bcopy (from, to, size)
1077     char *from, *to;
1078     int size;
1079{
1080  if (size <= 0 || from == to)
1081    return;
1082
1083  /* If the source and destination don't overlap, then bcopy can
1084     handle it.  If they do overlap, but the destination is lower in
1085     memory than the source, we'll assume bcopy can handle that.  */
1086  if (to < from || from + size <= to)
1087    bcopy (from, to, size);
1088
1089  /* Otherwise, we'll copy from the end.  */
1090  else
1091  {
1092    register char *endf = from + size;
1093    register char *endt = to + size;
1094
1095    /* If TO - FROM is large, then we should break the copy into
1096       nonoverlapping chunks of TO - FROM bytes each.  However, if
1097       TO - FROM is small, then the bcopy function call overhead
1098       makes this not worth it.  The crossover point could be about
1099       anywhere.  Since I don't think the obvious copy loop is too
1100       bad, I'm trying to err in its favor.  */
1101    if (to - from < 64)
1102    {
1103      do
1104        *--endt = *--endf;
1105      while (endf != from);
1106    }
1107    else
1108    {
1109      for (;;)
1110      {
1111        endt -= (to - from);
1112        endf -= (to - from);
1113
1114        if (endt < to)
1115          break;
1116
1117        bcopy (endf, endt, to - from);
1118      }
1119
1120      /* If SIZE wasn't a multiple of TO - FROM, there will be a
1121         little left over.  The amount left over is
1122         (endt + (to - from)) - to, which is endt - from.  */
1123      bcopy (from, to, endt - from);
1124    }
1125  }
1126}
1127#endif        /* Not emacs.  */
1128
1129#define memmove(to, from, size) safe_bcopy ((from), (to), (size))
1130
1131#endif
1132
1133
1134#define min(A, B) ((A) < (B) ? (A) : (B))
1135
1136/* Debugging hook for realloc.  */
1137__ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
1138
1139/* Resize the given region to the new size, returning a pointer
1140   to the (possibly moved) region.  This is optimized for speed;
1141   some benchmarks seem to indicate that greater compactness is
1142   achieved by unconditionally allocating and copying to a
1143   new region.  This module has incestuous knowledge of the
1144   internals of both free and malloc. */
1145__ptr_t
1146realloc (ptr, size)
1147     __ptr_t ptr;
1148     __malloc_size_t size;
1149{
1150  __ptr_t result;
1151  int type;
1152  __malloc_size_t block, blocks, oldlimit;
1153
1154  if (size == 0)
1155  {
1156    free (ptr);
1157    return malloc (0);
1158  }
1159  else if (ptr == NULL)
1160    return malloc (size);
1161
1162  if (__realloc_hook != NULL)
1163    return (*__realloc_hook) (ptr, size);
1164
1165  block = BLOCK (ptr);
1166
1167  type = _heapinfo[block].busy.type;
1168  switch (type)
1169  {
1170    case 0:
1171      /* Maybe reallocate a large block to a small fragment.  */
1172      if (size <= BLOCKSIZE / 2)
1173      {
1174        result = malloc (size);
1175        if (result != NULL)
1176        {
1177          memcpy (result, ptr, size);
1178          _free_internal (ptr);
1179          return result;
1180        }
1181      }
1182
1183      /* The new size is a large allocation as well;
1184         see if we can hold it in place. */
1185      blocks = BLOCKIFY (size);
1186      if (blocks < _heapinfo[block].busy.info.size)
1187      {
1188        /* The new size is smaller; return
1189           excess memory to the free list. */
1190        _heapinfo[block + blocks].busy.type = 0;
1191        _heapinfo[block + blocks].busy.info.size
1192          = _heapinfo[block].busy.info.size - blocks;
1193        _heapinfo[block].busy.info.size = blocks;
1194        /* We have just created a new chunk by splitting a chunk in two.
1195           Now we will free this chunk; increment the statistics counter
1196           so it doesn't become wrong when _free_internal decrements it.  */
1197        ++_chunks_used;
1198        _free_internal (ADDRESS (block + blocks));
1199        result = ptr;
1200      }
1201      else if (blocks == _heapinfo[block].busy.info.size)
1202        /* No size change necessary.  */
1203        result = ptr;
1204      else
1205      {
1206        /* Won't fit, so allocate a new region that will.
1207           Free the old region first in case there is sufficient
1208           adjacent free space to grow without moving. */
1209        blocks = _heapinfo[block].busy.info.size;
1210        /* Prevent free from actually returning memory to the system.  */
1211        oldlimit = _heaplimit;
1212        _heaplimit = 0;
1213        _free_internal (ptr);
1214        _heaplimit = oldlimit;
1215        result = malloc (size);
1216        if (result == NULL)
1217        {
1218          /* Now we're really in trouble.  We have to unfree
1219             the thing we just freed.  Unfortunately it might
1220             have been coalesced with its neighbors.  */
1221          if (_heapindex == block)
1222            (void) malloc (blocks * BLOCKSIZE);
1223          else
1224          {
1225            __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
1226            (void) malloc (blocks * BLOCKSIZE);
1227            _free_internal (previous);
1228          }
1229          return NULL;
1230        }
1231        if (ptr != result)
1232          memmove (result, ptr, blocks * BLOCKSIZE);
1233      }
1234      break;
1235
1236    default:
1237      /* Old size is a fragment; type is logarithm
1238         to base two of the fragment size.  */
1239      if (size > (__malloc_size_t) (1 << (type - 1)) &&
1240          size <= (__malloc_size_t) (1 << type))
1241        /* The new size is the same kind of fragment.  */
1242        result = ptr;
1243      else
1244      {
1245        /* The new size is different; allocate a new space,
1246           and copy the lesser of the new size and the old. */
1247        result = malloc (size);
1248        if (result == NULL)
1249          return NULL;
1250        memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1251        free (ptr);
1252      }
1253      break;
1254    }
1255
1256  return result;
1257}
1258/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1259
1260This library is free software; you can redistribute it and/or
1261modify it under the terms of the GNU Library General Public License as
1262published by the Free Software Foundation; either version 2 of the
1263License, or (at your option) any later version.
1264
1265This library is distributed in the hope that it will be useful,
1266but WITHOUT ANY WARRANTY; without even the implied warranty of
1267MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1268Library General Public License for more details.
1269
1270You should have received a copy of the GNU Library General Public
1271License along with this library; see the file COPYING.LIB.  If
1272not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1273Cambridge, MA 02139, USA.
1274
1275   The author may be reached (Email) at the address mike@ai.mit.edu,
1276   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1277
1278#ifndef _MALLOC_INTERNAL
1279#define _MALLOC_INTERNAL
1280#include <malloc.h>
1281#endif
1282
1283/* Allocate an array of NMEMB elements each SIZE bytes long.
1284   The entire array is initialized to zeros.  */
1285__ptr_t
1286calloc (nmemb, size)
1287     register __malloc_size_t nmemb;
1288     register __malloc_size_t size;
1289{
1290  register __ptr_t result = malloc (nmemb * size);
1291
1292  if (result != NULL)
1293    (void) memset (result, 0, nmemb * size);
1294
1295  return result;
1296}
1297/* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1298This file is part of the GNU C Library.
1299
1300The GNU C Library is free software; you can redistribute it and/or modify
1301it under the terms of the GNU General Public License as published by
1302the Free Software Foundation; either version 2, or (at your option)
1303any later version.
1304
1305The GNU C Library is distributed in the hope that it will be useful,
1306but WITHOUT ANY WARRANTY; without even the implied warranty of
1307MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1308GNU General Public License for more details.
1309
1310You should have received a copy of the GNU General Public License
1311along with the GNU C Library; see the file COPYING.  If not, write to
1312the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
1313
1314#ifndef _MALLOC_INTERNAL
1315#define _MALLOC_INTERNAL
1316#include <malloc.h>
1317#endif
1318
1319#ifndef __GNU_LIBRARY__
1320#define __sbrk sbrk
1321#endif
1322
1323#ifdef __GNU_LIBRARY__
1324#ifndef __GLIBC__
1325/* It is best not to declare this and cast its result on foreign operating
1326   systems with potentially hostile include files.  */
1327extern __ptr_t __sbrk __P ((int increment));
1328#endif
1329#endif
1330
1331#ifndef NULL
1332#define NULL 0
1333#endif
1334
1335/* Allocate INCREMENT more bytes of data space,
1336   and return the start of data space, or NULL on errors.
1337   If INCREMENT is negative, shrink data space.  */
1338__ptr_t
1339__default_morecore (increment)
1340#ifdef __STDC__
1341     ptrdiff_t increment;
1342#else
1343     int increment;
1344#endif
1345{
1346  __ptr_t result = (__ptr_t) __sbrk ((int) increment);
1347  if (result == (__ptr_t) -1)
1348    return NULL;
1349  return result;
1350}
1351/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1352
1353This library is free software; you can redistribute it and/or
1354modify it under the terms of the GNU Library General Public License as
1355published by the Free Software Foundation; either version 2 of the
1356License, or (at your option) any later version.
1357
1358This library is distributed in the hope that it will be useful,
1359but WITHOUT ANY WARRANTY; without even the implied warranty of
1360MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1361Library General Public License for more details.
1362
1363You should have received a copy of the GNU Library General Public
1364License along with this library; see the file COPYING.LIB.  If
1365not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1366Cambridge, MA 02139, USA.  */
1367
1368#ifndef _MALLOC_INTERNAL
1369#define _MALLOC_INTERNAL
1370#include <malloc.h>
1371#endif
1372
1373__ptr_t (*__memalign_hook) __P ((size_t __size, size_t __alignment));
1374
1375__ptr_t
1376memalign (alignment, size)
1377     __malloc_size_t alignment;
1378     __malloc_size_t size;
1379{
1380  __ptr_t result;
1381  unsigned long int adj;
1382
1383  if (__memalign_hook)
1384    return (*__memalign_hook) (alignment, size);
1385
1386  size = ((size + alignment - 1) / alignment) * alignment;
1387
1388  result = malloc (size);
1389  if (result == NULL)
1390    return NULL;
1391  adj = (unsigned long int) ((unsigned long int) ((char *) result -
1392                                                  (char *) NULL)) % alignment;
1393  if (adj != 0)
1394  {
1395    struct alignlist *l;
1396    for (l = _aligned_blocks; l != NULL; l = l->next)
1397      if (l->aligned == NULL)
1398        /* This slot is free.  Use it.  */
1399        break;
1400    if (l == NULL)
1401    {
1402      l = (struct alignlist *) malloc (sizeof (struct alignlist));
1403      if (l == NULL)
1404      {
1405        free (result);
1406        return NULL;
1407      }
1408      l->next = _aligned_blocks;
1409      _aligned_blocks = l;
1410    }
1411    l->exact = result;
1412    result = l->aligned = (char *) result + alignment - adj;
1413  }
1414
1415  return result;
1416}
1417#endif /* HAVE_GMALLOC */
Note: See TracBrowser for help on using the repository browser.