source: git/omalloc/gmalloc.c @ 8d0069

spielwiese
Last change on this file since 8d0069 was 8d0069, checked in by Hans Schönemann <hannes@…>, 23 years ago
*hannes: mylimits.h.in git-svn-id: file:///usr/local/Singular/svn/trunk@5332 2c84dea3-7e68-4137-9b89-c4e89433aadc
  • Property mode set to 100644
File size: 42.7 KB
Line 
1/*******************************************************************
2 *  File:    gmalloc.c
3 *  Purpose: implementation of malloc and friends from libc of
4 *           Linux kernel version 5
5 *  Version: $Id: gmalloc.c,v 1.5 2001-03-22 22:39:07 Singular Exp $
6 *******************************************************************/
7#include "omMalloc.h"
8
9#define __USE_XOPEN
10#define __USE_XOPEN_EXTENDED
11#define _MALLOC_INTERNAL
12
13/* The malloc headers and source files from the C library follow here.  */
14
15/* Declarations for `malloc' and friends.
16   Copyright 1990, 1991, 1992, 1993, 1995 Free Software Foundation, Inc.
17                  Written May 1989 by Mike Haertel.
18
19This library is free software; you can redistribute it and/or
20modify it under the terms of the GNU Library General Public License as
21published by the Free Software Foundation; either version 2 of the
22License, or (at your option) any later version.
23
24This library is distributed in the hope that it will be useful,
25but WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
27Library General Public License for more details.
28
29You should have received a copy of the GNU Library General Public
30License along with this library; see the file COPYING.LIB.  If
31not, write to the Free Software Foundation, Inc., 675 Mass Ave,
32Cambridge, MA 02139, USA.
33
34   The author may be reached (Email) at the address mike@ai.mit.edu,
35   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
36
37#ifndef _MALLOC_H
38
39#define _MALLOC_H        1
40
41#ifdef _MALLOC_INTERNAL
42
43#if        defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
44#include <string.h>
45#else
46#ifndef memset
47#define memset(s, zero, n) bzero ((s), (n))
48#endif
49#ifndef memcpy
50#define memcpy(d, s, n)    bcopy ((s), (d), (n))
51#endif
52#endif
53
54#if defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
55#include <mylimits.h>
56#else
57#ifndef CHAR_BIT
58#define CHAR_BIT 8
59#endif
60#endif
61
62#ifdef HAVE_UNISTD_H
63#include <unistd.h>
64#endif
65
66#endif /* _MALLOC_INTERNAL.  */
67
68
69#ifdef __cplusplus
70extern "C"
71{
72#endif
73
74#if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
75#undef  __P
76#define __P(args) args
77#undef  __ptr_t
78#define __ptr_t   void *
79#else /* Not C++ or ANSI C.  */
80#undef  __P
81#define __P(args) ()
82#undef  const
83#define const
84#undef  __ptr_t
85#define __ptr_t char *
86#endif /* C++ or ANSI C.  */
87
88#if defined (__STDC__) && __STDC__
89#include <stddef.h>
90#define  __malloc_size_t    size_t
91#define  __malloc_ptrdiff_t ptrdiff_t
92#else
93#define  __malloc_size_t    unsigned long
94#define  __malloc_ptrdiff_t long
95#endif
96
97#ifndef  NULL
98#define  NULL 0
99#endif
100
101
102/* Allocate SIZE bytes of memory.  */
103extern __ptr_t malloc __P ((__malloc_size_t __size));
104/* Re-allocate the previously allocated block
105   in __ptr_t, making the new block SIZE bytes long.  */
106extern __ptr_t realloc __P ((__ptr_t __ptr, __malloc_size_t __size));
107/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
108extern __ptr_t calloc __P ((__malloc_size_t __nmemb, __malloc_size_t __size));
109/* Free a block allocated by `malloc', `realloc' or `calloc'.  */
110extern void free __P ((__ptr_t __ptr));
111
112/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
113extern __ptr_t memalign __P ((__malloc_size_t __alignment,
114                              __malloc_size_t __size));
115
116/* Allocate SIZE bytes on a page boundary.  */
117extern __ptr_t valloc __P ((__malloc_size_t __size));
118
119
120#ifdef _MALLOC_INTERNAL
121
122/* The allocator divides the heap into blocks of fixed size; large
123   requests receive one or more whole blocks, and small requests
124   receive a fragment of a block.  Fragment sizes are powers of two,
125   and all fragments of a block are the same size.  When all the
126   fragments in a block have been freed, the block itself is freed.  */
127#define INT_BIT        (CHAR_BIT * sizeof(int))
128#ifdef __alpha
129#define BLOCKLOG       (13)
130#else
131#define BLOCKLOG       (INT_BIT > 16 ? 12 : 9)
132#endif
133#define BLOCKSIZE      (1 << BLOCKLOG)
134#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
135
136/* Determine the amount of memory spanned by the initial heap table
137   (not an absolute limit).  */
138#define HEAP           (INT_BIT > 16 ? 4194304 : 65536)
139
140/* Number of contiguous free blocks allowed to build up at the end of
141   memory before they will be returned to the system.  */
142#define FINAL_FREE_BLOCKS 8
143
144/* Data structure giving per-block information.  */
145typedef union
146  {
147    /* Heap information for a busy block.  */
148    struct
149    {
150      /* Zero for a large (multiblock) object, or positive giving the
151         logarithm to the base two of the fragment size.  */
152      int type;
153      union
154      {
155        struct
156        {
157          __malloc_size_t nfree; /* Free frags in a fragmented block.  */
158          __malloc_size_t first; /* First free fragment of the block.  */
159        } frag;
160        /* For a large object, in its first block, this has the number
161           of blocks in the object.  In the other blocks, this has a
162           negative number which says how far back the first block is.  */
163        __malloc_ptrdiff_t size;
164      } info;
165    } busy;
166    /* Heap information for a free block
167       (that may be the first of a free cluster).  */
168    struct
169    {
170      __malloc_size_t size;        /* Size (in blocks) of a free cluster.  */
171      __malloc_size_t next;        /* Index of next free cluster.  */
172      __malloc_size_t prev;        /* Index of previous free cluster.  */
173    } free;
174  } malloc_info;
175
176/* Pointer to first block of the heap.  */
177extern char *_heapbase;
178
179/* Table indexed by block number giving per-block information.  */
180extern malloc_info *_heapinfo;
181
182/* Address to block number and vice versa.  */
183#define BLOCK(A)   (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
184#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
185
186/* Current search index for the heap table.  */
187extern __malloc_size_t _heapindex;
188
189/* Limit of valid info table indices.  */
190extern __malloc_size_t _heaplimit;
191
192/* Doubly linked lists of free fragments.  */
193struct list
194{
195  struct list *next;
196  struct list *prev;
197};
198
199/* Free list headers for each fragment size.  */
200extern struct list _fraghead[];
201
202/* List of blocks allocated with `memalign' (or `valloc').  */
203struct alignlist
204{
205  struct alignlist *next;
206  __ptr_t aligned;        /* The address that memaligned returned.  */
207  __ptr_t exact;          /* The address that malloc returned.  */
208};
209extern struct alignlist *_aligned_blocks;
210
211/* Instrumentation.  */
212extern __malloc_size_t _chunks_used;
213extern __malloc_size_t _bytes_used;
214extern __malloc_size_t _chunks_free;
215extern __malloc_size_t _bytes_free;
216
217/* Internal version of `free' used in `morecore' (malloc.c). */
218extern void _free_internal __P ((__ptr_t __ptr));
219
220#endif /* _MALLOC_INTERNAL.  */
221
222/* Given an address in the middle of a malloc'd object,
223   return the address of the beginning of the object.  */
224extern __ptr_t malloc_find_object_address __P ((__ptr_t __ptr));
225
226/* Underlying allocation function; successive calls should
227   return contiguous pieces of memory.  */
228extern __ptr_t (*__morecore) __P ((__malloc_ptrdiff_t __size));
229
230/* Default value of `__morecore'.  */
231extern __ptr_t __default_morecore __P ((__malloc_ptrdiff_t __size));
232
233/* If not NULL, this function is called after each time
234   `__morecore' is called to increase the data size.  */
235extern void (*__after_morecore_hook) __P ((void));
236
237/* Nonzero if `malloc' has been called and done its initialization.  */
238extern int __malloc_initialized;
239
240/* Hooks for debugging versions.  */
241extern void (*__malloc_initialize_hook) __P ((void));
242extern void (*__free_hook) __P ((__ptr_t __ptr));
243extern __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
244extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
245extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size,
246                                        __malloc_size_t __alignment));
247
248/* Return values for `mprobe': these are the kinds of inconsistencies that
249   `mcheck' enables detection of.  */
250enum mcheck_status
251{
252  MCHECK_DISABLED = -1,  /* Consistency checking is not turned on.  */
253  MCHECK_OK,             /* Block is fine.  */
254  MCHECK_FREE,           /* Block freed twice.  */
255  MCHECK_HEAD,           /* Memory before the block was clobbered.  */
256  MCHECK_TAIL            /* Memory after the block was clobbered.  */
257};
258
259/* Activate a standard collection of debugging hooks.  This must be called
260   before `malloc' is ever called.  ABORTFUNC is called with an error code
261   (see enum above) when an inconsistency is detected.  If ABORTFUNC is
262   null, the standard function prints on stderr and then calls `abort'.  */
263extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
264
265/* Check for aberrations in a particular malloc'd block.  You must have
266   called `mcheck' already.  These are the same checks that `mcheck' does
267   when you free or reallocate a block.  */
268extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
269
270/* Activate a standard collection of tracing hooks.  */
271extern void mtrace __P ((void));
272extern void muntrace __P ((void));
273
274/* Statistics available to the user.  */
275struct mstats
276{
277  __malloc_size_t bytes_total; /* Total size of the heap. */
278  __malloc_size_t chunks_used; /* Chunks allocated by the user. */
279  __malloc_size_t bytes_used;  /* Byte total of user-allocated chunks. */
280  __malloc_size_t chunks_free; /* Chunks in the free list. */
281    __malloc_size_t bytes_free;/* Byte total of chunks in the free list. */
282};
283
284/* Pick up the current statistics. */
285extern struct mstats mstats __P ((void));
286
287/* Call WARNFUN with a warning message when memory usage is high.  */
288extern void memory_warnings __P ((__ptr_t __start,
289                                  void (*__warnfun) __P ((const char *))));
290
291
292/* Relocating allocator.  */
293
294/* Allocate SIZE bytes, and store the address in *HANDLEPTR.  */
295extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
296
297/* Free the storage allocated in HANDLEPTR.  */
298extern void r_alloc_free __P ((__ptr_t *__handleptr));
299
300/* Adjust the block at HANDLEPTR to be SIZE bytes long.  */
301extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
302
303
304#ifdef __cplusplus
305}
306#endif
307
308#endif /* malloc.h  */
309/* Allocate memory on a page boundary.
310   Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
311
312This library is free software; you can redistribute it and/or
313modify it under the terms of the GNU Library General Public License as
314published by the Free Software Foundation; either version 2 of the
315License, or (at your option) any later version.
316
317This library is distributed in the hope that it will be useful,
318but WITHOUT ANY WARRANTY; without even the implied warranty of
319MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
320Library General Public License for more details.
321
322You should have received a copy of the GNU Library General Public
323License along with this library; see the file COPYING.LIB.  If
324not, write to the Free Software Foundation, Inc., 675 Mass Ave,
325Cambridge, MA 02139, USA.
326
327   The author may be reached (Email) at the address mike@ai.mit.edu,
328   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
329
330#if defined (__GNU_LIBRARY__) || defined (_LIBC)
331#include <stddef.h>
332#include <sys/cdefs.h>
333/* obachman: no declaration: conflicts with gnulibc6 unistd.h */
334/* extern size_t __getpagesize __P ((void)); */
335#else
336#if 0 /* obachman: pasted in getpagesize.h manually */
337#include "getpagesize.h"
338#else
339
340#ifdef VMS
341#define getpagesize() 512
342#endif
343
344#ifdef HAVE_UNISTD_H
345#include <unistd.h>
346#endif
347
348#ifdef _SC_PAGESIZE
349#define getpagesize() sysconf(_SC_PAGESIZE)
350#else
351
352#include <sys/param.h>
353
354#ifdef EXEC_PAGESIZE
355#define getpagesize() EXEC_PAGESIZE
356#else
357#ifdef NBPG
358#define getpagesize() NBPG * CLSIZE
359#ifndef CLSIZE
360#define CLSIZE 1
361#endif /* no CLSIZE */
362#else /* no NBPG */
363#ifdef NBPC
364#define getpagesize() NBPC
365#else /* no NBPC */
366#ifdef PAGESIZE
367#define getpagesize() PAGESIZE
368#endif
369#endif /* NBPC */
370#endif /* no NBPG */
371#endif /* no EXEC_PAGESIZE */
372#endif /* no _SC_PAGESIZE */
373
374/* obachman: undef , gnulibc6 conflict with unistd.h */
375#define __getpagesize() getpagesize()
376#endif /* if 0 */
377#endif
378
379#ifndef _MALLOC_INTERNAL
380#define _MALLOC_INTERNAL
381#include <malloc.h>
382#endif
383
384static __malloc_size_t pagesize;
385
386__ptr_t
387valloc (size)
388     __malloc_size_t size;
389{
390  if (pagesize == 0)
391/* obachman: use getpagesize, instead
392    pagesize = __getpagesize ();
393*/
394    pagesize = getpagesize ();
395
396  return memalign (pagesize, size);
397}
398/* Memory allocator `malloc'.
399   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
400                  Written May 1989 by Mike Haertel.
401
402This library is free software; you can redistribute it and/or
403modify it under the terms of the GNU Library General Public License as
404published by the Free Software Foundation; either version 2 of the
405License, or (at your option) any later version.
406
407This library is distributed in the hope that it will be useful,
408but WITHOUT ANY WARRANTY; without even the implied warranty of
409MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
410Library General Public License for more details.
411
412You should have received a copy of the GNU Library General Public
413License along with this library; see the file COPYING.LIB.  If
414not, write to the Free Software Foundation, Inc., 675 Mass Ave,
415Cambridge, MA 02139, USA.
416
417   The author may be reached (Email) at the address mike@ai.mit.edu,
418   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
419
420#ifndef        _MALLOC_INTERNAL
421#define _MALLOC_INTERNAL
422#include <malloc.h>
423#endif
424
425/* How to really get more memory.  */
426__ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
427
428/* Debugging hook for `malloc'.  */
429__ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
430
431/* Pointer to the base of the first block.  */
432char *_heapbase;
433
434/* Block information table.  Allocated with align/__free (not malloc/free).  */
435malloc_info *_heapinfo;
436
437/* Number of info entries.  */
438static __malloc_size_t heapsize;
439
440/* Search index in the info table.  */
441__malloc_size_t _heapindex;
442
443/* Limit of valid info table indices.  */
444__malloc_size_t _heaplimit;
445
446/* Free lists for each fragment size.  */
447struct list _fraghead[BLOCKLOG];
448
449/* Instrumentation.  */
450__malloc_size_t _chunks_used;
451__malloc_size_t _bytes_used;
452__malloc_size_t _chunks_free;
453__malloc_size_t _bytes_free;
454
455/* Are you experienced?  */
456int __malloc_initialized;
457
458void (*__malloc_initialize_hook) __P ((void));
459void (*__after_morecore_hook) __P ((void));
460
461/* Aligned allocation.  */
462static __ptr_t align __P ((__malloc_size_t));
463static __ptr_t
464align (size)
465     __malloc_size_t size;
466{
467  __ptr_t result;
468  unsigned long int adj;
469 
470  /* 9/99 obachman@mathematik.uni-kl.de: prevent calling morecore
471     with negative arguments here */
472  if ((ptrdiff_t) size < 0) return NULL;
473 
474  result = (*__morecore) (size);
475  adj = (unsigned long int) ((unsigned long int) ((char *) result -
476                                                  (char *) NULL)) % BLOCKSIZE;
477  if (adj != 0)
478  {
479    adj = BLOCKSIZE - adj;
480    (void) (*__morecore) (adj);
481    result = (char *) result + adj;
482  }
483
484  if (__after_morecore_hook)
485    (*__after_morecore_hook) ();
486
487  return result;
488}
489
490/* Set everything up and remember that we have.  */
491static int initialize __P ((void));
492static int
493initialize ()
494{
495  if (__malloc_initialize_hook)
496    (*__malloc_initialize_hook) ();
497
498  heapsize = HEAP / BLOCKSIZE;
499  _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
500  if (_heapinfo == NULL)
501    return 0;
502  memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
503  _heapinfo[0].free.size = 0;
504  _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
505  _heapindex = 0;
506  _heapbase = (char *) _heapinfo;
507
508  /* Account for the _heapinfo block itself in the statistics.  */
509  _bytes_used = heapsize * sizeof (malloc_info);
510  _chunks_used = 1;
511
512  __malloc_initialized = 1;
513  return 1;
514}
515
516/* Get neatly aligned memory, initializing or
517   growing the heap info table as necessary. */
518static __ptr_t morecore __P ((__malloc_size_t));
519static __ptr_t
520morecore (size)
521     __malloc_size_t size;
522{
523  __ptr_t result;
524  malloc_info *newinfo, *oldinfo;
525  __malloc_size_t newsize;
526
527  result = align (size);
528  if (result == NULL)
529    return NULL;
530
531  /* Check if we need to grow the info table.  */
532  if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
533  {
534    newsize = heapsize;
535    while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
536      newsize *= 2;
537    newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
538    if (newinfo == NULL)
539    {
540      (*__morecore) (-size);
541      return NULL;
542    }
543    memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
544    memset (&newinfo[heapsize], 0,
545            (newsize - heapsize) * sizeof (malloc_info));
546    oldinfo = _heapinfo;
547    newinfo[BLOCK (oldinfo)].busy.type = 0;
548    newinfo[BLOCK (oldinfo)].busy.info.size
549      = BLOCKIFY (heapsize * sizeof (malloc_info));
550    _heapinfo = newinfo;
551    /* Account for the _heapinfo block itself in the statistics.  */
552    _bytes_used += newsize * sizeof (malloc_info);
553    ++_chunks_used;
554    _free_internal (oldinfo);
555    heapsize = newsize;
556  }
557
558  _heaplimit = BLOCK ((char *) result + size);
559  return result;
560}
561
562/* Allocate memory from the heap.  */
563__ptr_t
564malloc (size)
565     __malloc_size_t size;
566{
567  __ptr_t result;
568  __malloc_size_t block, blocks, lastblocks, start;
569  register __malloc_size_t i;
570  struct list *next;
571
572  /* ANSI C allows `malloc (0)' to either return NULL, or to return a
573     valid address you can realloc and free (though not dereference).
574
575     It turns out that some extant code (sunrpc, at least Ultrix's version)
576     expects `malloc (0)' to return non-NULL and breaks otherwise.
577     Be compatible.  */
578
579#if        0
580  if (size == 0)
581    return NULL;
582#endif
583
584  if (__malloc_hook != NULL)
585    return (*__malloc_hook) (size);
586
587  if (!__malloc_initialized)
588    if (!initialize ())
589      return NULL;
590
591  if (size < sizeof (struct list))
592    size = sizeof (struct list);
593
594#ifdef SUNOS_LOCALTIME_BUG
595  if (size < 16)
596    size = 16;
597#endif
598
599  /* Determine the allocation policy based on the request size.  */
600  if (size <= BLOCKSIZE / 2)
601  {
602    /* Small allocation to receive a fragment of a block.
603       Determine the logarithm to base two of the fragment size. */
604    register __malloc_size_t log = 1;
605    --size;
606    while ((size /= 2) != 0)
607      ++log;
608
609    /* Look in the fragment lists for a
610       free fragment of the desired size. */
611    next = _fraghead[log].next;
612    if (next != NULL)
613    {
614      /* There are free fragments of this size.
615         Pop a fragment out of the fragment list and return it.
616         Update the block's nfree and first counters. */
617      result = (__ptr_t) next;
618      next->prev->next = next->next;
619      if (next->next != NULL)
620        next->next->prev = next->prev;
621      block = BLOCK (result);
622      if (--_heapinfo[block].busy.info.frag.nfree != 0)
623        _heapinfo[block].busy.info.frag.first = (unsigned long int)
624        ((unsigned long int) ((char *) next->next - (char *) NULL)
625          % BLOCKSIZE) >> log;
626
627      /* Update the statistics.  */
628      ++_chunks_used;
629      _bytes_used += 1 << log;
630      --_chunks_free;
631      _bytes_free -= 1 << log;
632    }
633    else
634    {
635      /* No free fragments of the desired size, so get a new block
636         and break it into fragments, returning the first.  */
637      result = malloc (BLOCKSIZE);
638      if (result == NULL)
639        return NULL;
640
641      /* Link all fragments but the first into the free list.  */
642      for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
643      {
644        next = (struct list *) ((char *) result + (i << log));
645        next->next = _fraghead[log].next;
646        next->prev = &_fraghead[log];
647        next->prev->next = next;
648        if (next->next != NULL)
649          next->next->prev = next;
650      }
651
652      /* Initialize the nfree and first counters for this block.  */
653      block = BLOCK (result);
654      _heapinfo[block].busy.type = log;
655      _heapinfo[block].busy.info.frag.nfree = i - 1;
656      _heapinfo[block].busy.info.frag.first = i - 1;
657
658      _chunks_free += (BLOCKSIZE >> log) - 1;
659      _bytes_free += BLOCKSIZE - (1 << log);
660      _bytes_used -= BLOCKSIZE - (1 << log);
661    }
662  }
663  else
664  {
665    /* Large allocation to receive one or more blocks.
666       Search the free list in a circle starting at the last place visited.
667       If we loop completely around without finding a large enough
668       space we will have to get more memory from the system.  */
669    blocks = BLOCKIFY (size);
670    start = block = _heapindex;
671    while (_heapinfo[block].free.size < blocks)
672    {
673      block = _heapinfo[block].free.next;
674      if (block == start)
675      {
676        /* Need to get more from the system.  Check to see if
677           the new core will be contiguous with the final free
678           block; if so we don't need to get as much.  */
679        block = _heapinfo[0].free.prev;
680        lastblocks = _heapinfo[block].free.size;
681        if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
682          (*__morecore) (0) == ADDRESS (block + lastblocks) &&
683          (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
684        {
685          /* Which block we are extending (the `final free
686             block' referred to above) might have changed, if
687             it got combined with a freed info table.  */
688          block = _heapinfo[0].free.prev;
689          _heapinfo[block].free.size += (blocks - lastblocks);
690          _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
691          continue;
692        }
693        result = morecore (blocks * BLOCKSIZE);
694        if (result == NULL)
695          return NULL;
696        block = BLOCK (result);
697        _heapinfo[block].busy.type = 0;
698        _heapinfo[block].busy.info.size = blocks;
699        ++_chunks_used;
700        _bytes_used += blocks * BLOCKSIZE;
701        return result;
702      }
703    }
704
705    /* At this point we have found a suitable free list entry.
706       Figure out how to remove what we need from the list. */
707    result = ADDRESS (block);
708    if (_heapinfo[block].free.size > blocks)
709    {
710      /* The block we found has a bit left over,
711         so relink the tail end back into the free list. */
712      _heapinfo[block + blocks].free.size
713        = _heapinfo[block].free.size - blocks;
714      _heapinfo[block + blocks].free.next
715        = _heapinfo[block].free.next;
716      _heapinfo[block + blocks].free.prev
717        = _heapinfo[block].free.prev;
718      _heapinfo[_heapinfo[block].free.prev].free.next
719        = _heapinfo[_heapinfo[block].free.next].free.prev
720        = _heapindex = block + blocks;
721    }
722    else
723    {
724      /* The block exactly matches our requirements,
725         so just remove it from the list. */
726      _heapinfo[_heapinfo[block].free.next].free.prev
727        = _heapinfo[block].free.prev;
728      _heapinfo[_heapinfo[block].free.prev].free.next
729        = _heapindex = _heapinfo[block].free.next;
730      --_chunks_free;
731    }
732
733    _heapinfo[block].busy.type = 0;
734    _heapinfo[block].busy.info.size = blocks;
735    ++_chunks_used;
736    _bytes_used += blocks * BLOCKSIZE;
737    _bytes_free -= blocks * BLOCKSIZE;
738
739    /* Mark all the blocks of the object just allocated except for the
740       first with a negative number so you can find the first block by
741       adding that adjustment.  */
742    while (--blocks > 0)
743     _heapinfo[block + blocks].busy.info.size = -blocks;
744  }
745
746  return result;
747}
748
749#ifndef _LIBC
750
751/* On some ANSI C systems, some libc functions call _malloc, _free
752   and _realloc.  Make them use the GNU functions.  */
753
754__ptr_t
755_malloc (size)
756     __malloc_size_t size;
757{
758  return malloc (size);
759}
760
761void
762_free (ptr)
763     __ptr_t ptr;
764{
765  free (ptr);
766}
767
768__ptr_t
769_realloc (ptr, size)
770     __ptr_t ptr;
771     __malloc_size_t size;
772{
773  return realloc (ptr, size);
774}
775
776#endif
777/* Free a block of memory allocated by `malloc'.
778   Copyright 1990, 1991, 1992, 1994 Free Software Foundation, Inc.
779   Written May 1989 by Mike Haertel.
780
781This library is free software; you can redistribute it and/or
782modify it under the terms of the GNU Library General Public License as
783published by the Free Software Foundation; either version 2 of the
784License, or (at your option) any later version.
785
786This library is distributed in the hope that it will be useful,
787but WITHOUT ANY WARRANTY; without even the implied warranty of
788MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
789Library General Public License for more details.
790
791You should have received a copy of the GNU Library General Public
792License along with this library; see the file COPYING.LIB.  If
793not, write to the Free Software Foundation, Inc., 675 Mass Ave,
794Cambridge, MA 02139, USA.
795
796   The author may be reached (Email) at the address mike@ai.mit.edu,
797   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
798
799#ifndef        _MALLOC_INTERNAL
800#define _MALLOC_INTERNAL
801#include <malloc.h>
802#endif
803
804/* Debugging hook for free.  */
805void (*__free_hook) __P ((__ptr_t __ptr));
806
807/* List of blocks allocated by memalign.  */
808struct alignlist *_aligned_blocks = NULL;
809
810/* Return memory to the heap.
811   Like `free' but don't call a __free_hook if there is one.  */
812void
813_free_internal (ptr)
814     __ptr_t ptr;
815{
816  int type;
817  __malloc_size_t block, blocks;
818  register __malloc_size_t i;
819  struct list *prev, *next;
820
821  block = BLOCK (ptr);
822
823  type = _heapinfo[block].busy.type;
824  switch (type)
825  {
826    case 0:
827      /* Get as many statistics as early as we can.  */
828      --_chunks_used;
829      _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
830      _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
831
832      /* Find the free cluster previous to this one in the free list.
833         Start searching at the last block referenced; this may benefit
834         programs with locality of allocation.  */
835      i = _heapindex;
836      if (i > block)
837        while (i > block)
838          i = _heapinfo[i].free.prev;
839      else
840      {
841        do
842          i = _heapinfo[i].free.next;
843        while (i > 0 && i < block);
844        i = _heapinfo[i].free.prev;
845      }
846
847      /* Determine how to link this block into the free list.  */
848      if (block == i + _heapinfo[i].free.size)
849      {
850        /* Coalesce this block with its predecessor.  */
851        _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
852        block = i;
853      }
854      else
855      {
856        /* Really link this block back into the free list.  */
857        _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
858        _heapinfo[block].free.next = _heapinfo[i].free.next;
859        _heapinfo[block].free.prev = i;
860        _heapinfo[i].free.next = block;
861        _heapinfo[_heapinfo[block].free.next].free.prev = block;
862        ++_chunks_free;
863      }
864
865      /* Now that the block is linked in, see if we can coalesce it
866         with its successor (by deleting its successor from the list
867         and adding in its size).  */
868      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
869      {
870        _heapinfo[block].free.size
871          += _heapinfo[_heapinfo[block].free.next].free.size;
872        _heapinfo[block].free.next
873          = _heapinfo[_heapinfo[block].free.next].free.next;
874        _heapinfo[_heapinfo[block].free.next].free.prev = block;
875        --_chunks_free;
876      }
877
878      /* Now see if we can return stuff to the system.  */
879      blocks = _heapinfo[block].free.size;
880      if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
881        && (*__morecore) (0) == ADDRESS (block + blocks))
882      {
883        register __malloc_size_t bytes = blocks * BLOCKSIZE;
884        _heaplimit -= blocks;
885        (*__morecore) (-bytes);
886        _heapinfo[_heapinfo[block].free.prev].free.next
887          = _heapinfo[block].free.next;
888        _heapinfo[_heapinfo[block].free.next].free.prev
889          = _heapinfo[block].free.prev;
890        block = _heapinfo[block].free.prev;
891        --_chunks_free;
892        _bytes_free -= bytes;
893      }
894
895      /* Set the next search to begin at this block.  */
896      _heapindex = block;
897      break;
898
899    default:
900      /* Do some of the statistics.  */
901      --_chunks_used;
902      _bytes_used -= 1 << type;
903      ++_chunks_free;
904      _bytes_free += 1 << type;
905
906      /* Get the address of the first free fragment in this block.  */
907      prev = (struct list *) ((char *) ADDRESS (block) +
908                           (_heapinfo[block].busy.info.frag.first << type));
909
910      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
911      {
912        /* If all fragments of this block are free, remove them
913           from the fragment list and free the whole block.  */
914        next = prev;
915        for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
916          next = next->next;
917        prev->prev->next = next;
918        if (next != NULL)
919          next->prev = prev->prev;
920        _heapinfo[block].busy.type = 0;
921        _heapinfo[block].busy.info.size = 1;
922
923        /* Keep the statistics accurate.  */
924        ++_chunks_used;
925        _bytes_used += BLOCKSIZE;
926        _chunks_free -= BLOCKSIZE >> type;
927        _bytes_free -= BLOCKSIZE;
928
929        free (ADDRESS (block));
930      }
931      else if (_heapinfo[block].busy.info.frag.nfree != 0)
932      {
933        /* If some fragments of this block are free, link this
934           fragment into the fragment list after the first free
935           fragment of this block. */
936        next = (struct list *) ptr;
937        next->next = prev->next;
938        next->prev = prev;
939        prev->next = next;
940        if (next->next != NULL)
941          next->next->prev = next;
942        ++_heapinfo[block].busy.info.frag.nfree;
943      }
944      else
945      {
946        /* No fragments of this block are free, so link this
947           fragment into the fragment list and announce that
948           it is the first free fragment of this block. */
949        prev = (struct list *) ptr;
950        _heapinfo[block].busy.info.frag.nfree = 1;
951        _heapinfo[block].busy.info.frag.first = (unsigned long int)
952          ((unsigned long int) ((char *) ptr - (char *) NULL)
953           % BLOCKSIZE >> type);
954        prev->next = _fraghead[type].next;
955        prev->prev = &_fraghead[type];
956        prev->prev->next = prev;
957        if (prev->next != NULL)
958          prev->next->prev = prev;
959      }
960      break;
961    }
962}
963
964/* Return memory to the heap.  */
965void
966free (ptr)
967     __ptr_t ptr;
968{
969  register struct alignlist *l;
970
971  if (ptr == NULL)
972    return;
973
974  for (l = _aligned_blocks; l != NULL; l = l->next)
975    if (l->aligned == ptr)
976    {
977      l->aligned = NULL;        /* Mark the slot in the list as free.  */
978      ptr = l->exact;
979      break;
980    }
981
982  if (__free_hook != NULL)
983    (*__free_hook) (ptr);
984  else
985    _free_internal (ptr);
986}
987/* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
988This file is part of the GNU C Library.
989
990The GNU C Library is free software; you can redistribute it and/or
991modify it under the terms of the GNU Library General Public License as
992published by the Free Software Foundation; either version 2 of the
993License, or (at your option) any later version.
994
995The GNU C Library is distributed in the hope that it will be useful,
996but WITHOUT ANY WARRANTY; without even the implied warranty of
997MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
998Library General Public License for more details.
999
1000You should have received a copy of the GNU Library General Public
1001License along with the GNU C Library; see the file COPYING.LIB.  If
1002not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1003Cambridge, MA 02139, USA.  */
1004
1005#ifndef        _MALLOC_INTERNAL
1006#define _MALLOC_INTERNAL
1007#include <malloc.h>
1008#endif
1009
1010#ifdef _LIBC
1011
1012#include <ansidecl.h>
1013#include <gnu-stabs.h>
1014
1015#undef cfree
1016
1017function_alias(cfree, free, void, (ptr),
1018               DEFUN(cfree, (ptr), PTR ptr))
1019
1020#else
1021
1022void
1023cfree (ptr)
1024     __ptr_t ptr;
1025{
1026  free (ptr);
1027}
1028
1029#endif
1030/* Change the size of a block allocated by `malloc'.
1031   Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1032                     Written May 1989 by Mike Haertel.
1033
1034This library is free software; you can redistribute it and/or
1035modify it under the terms of the GNU Library General Public License as
1036published by the Free Software Foundation; either version 2 of the
1037License, or (at your option) any later version.
1038
1039This library is distributed in the hope that it will be useful,
1040but WITHOUT ANY WARRANTY; without even the implied warranty of
1041MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1042Library General Public License for more details.
1043
1044You should have received a copy of the GNU Library General Public
1045License along with this library; see the file COPYING.LIB.  If
1046not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1047Cambridge, MA 02139, USA.
1048
1049   The author may be reached (Email) at the address mike@ai.mit.edu,
1050   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1051
1052#ifndef        _MALLOC_INTERNAL
1053#define _MALLOC_INTERNAL
1054#include <malloc.h>
1055#endif
1056
1057#if  (defined (MEMMOVE_MISSING) || \
1058      !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1059
1060/* Snarfed directly from Emacs src/dispnew.c:
1061   XXX Should use system bcopy if it handles overlap.  */
1062#ifndef emacs
1063
1064/* Like bcopy except never gets confused by overlap.  */
1065
1066static void
1067safe_bcopy (from, to, size)
1068     char *from, *to;
1069     int size;
1070{
1071  if (size <= 0 || from == to)
1072    return;
1073
1074  /* If the source and destination don't overlap, then bcopy can
1075     handle it.  If they do overlap, but the destination is lower in
1076     memory than the source, we'll assume bcopy can handle that.  */
1077  if (to < from || from + size <= to)
1078    bcopy (from, to, size);
1079
1080  /* Otherwise, we'll copy from the end.  */
1081  else
1082  {
1083    register char *endf = from + size;
1084    register char *endt = to + size;
1085
1086    /* If TO - FROM is large, then we should break the copy into
1087       nonoverlapping chunks of TO - FROM bytes each.  However, if
1088       TO - FROM is small, then the bcopy function call overhead
1089       makes this not worth it.  The crossover point could be about
1090       anywhere.  Since I don't think the obvious copy loop is too
1091       bad, I'm trying to err in its favor.  */
1092    if (to - from < 64)
1093    {
1094      do
1095        *--endt = *--endf;
1096      while (endf != from);
1097    }
1098    else
1099    {
1100      for (;;)
1101      {
1102        endt -= (to - from);
1103        endf -= (to - from);
1104
1105        if (endt < to)
1106          break;
1107
1108        bcopy (endf, endt, to - from);
1109      }
1110
1111      /* If SIZE wasn't a multiple of TO - FROM, there will be a
1112         little left over.  The amount left over is
1113         (endt + (to - from)) - to, which is endt - from.  */
1114      bcopy (from, to, endt - from);
1115    }
1116  }
1117}
1118#endif        /* Not emacs.  */
1119
1120#define memmove(to, from, size) safe_bcopy ((from), (to), (size))
1121
1122#endif
1123
1124
1125#define min(A, B) ((A) < (B) ? (A) : (B))
1126
1127/* Debugging hook for realloc.  */
1128__ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
1129
1130/* Resize the given region to the new size, returning a pointer
1131   to the (possibly moved) region.  This is optimized for speed;
1132   some benchmarks seem to indicate that greater compactness is
1133   achieved by unconditionally allocating and copying to a
1134   new region.  This module has incestuous knowledge of the
1135   internals of both free and malloc. */
1136__ptr_t
1137realloc (ptr, size)
1138     __ptr_t ptr;
1139     __malloc_size_t size;
1140{
1141  __ptr_t result;
1142  int type;
1143  __malloc_size_t block, blocks, oldlimit;
1144
1145  if (size == 0)
1146  {
1147    free (ptr);
1148    return malloc (0);
1149  }
1150  else if (ptr == NULL)
1151    return malloc (size);
1152
1153  if (__realloc_hook != NULL)
1154    return (*__realloc_hook) (ptr, size);
1155
1156  block = BLOCK (ptr);
1157
1158  type = _heapinfo[block].busy.type;
1159  switch (type)
1160  {
1161    case 0:
1162      /* Maybe reallocate a large block to a small fragment.  */
1163      if (size <= BLOCKSIZE / 2)
1164      {
1165        result = malloc (size);
1166        if (result != NULL)
1167        {
1168          memcpy (result, ptr, size);
1169          _free_internal (ptr);
1170          return result;
1171        }
1172      }
1173
1174      /* The new size is a large allocation as well;
1175         see if we can hold it in place. */
1176      blocks = BLOCKIFY (size);
1177      if (blocks < _heapinfo[block].busy.info.size)
1178      {
1179        /* The new size is smaller; return
1180           excess memory to the free list. */
1181        _heapinfo[block + blocks].busy.type = 0;
1182        _heapinfo[block + blocks].busy.info.size
1183          = _heapinfo[block].busy.info.size - blocks;
1184        _heapinfo[block].busy.info.size = blocks;
1185        /* We have just created a new chunk by splitting a chunk in two.
1186           Now we will free this chunk; increment the statistics counter
1187           so it doesn't become wrong when _free_internal decrements it.  */
1188        ++_chunks_used;
1189        _free_internal (ADDRESS (block + blocks));
1190        result = ptr;
1191      }
1192      else if (blocks == _heapinfo[block].busy.info.size)
1193        /* No size change necessary.  */
1194        result = ptr;
1195      else
1196      {
1197        /* Won't fit, so allocate a new region that will.
1198           Free the old region first in case there is sufficient
1199           adjacent free space to grow without moving. */
1200        blocks = _heapinfo[block].busy.info.size;
1201        /* Prevent free from actually returning memory to the system.  */
1202        oldlimit = _heaplimit;
1203        _heaplimit = 0;
1204        _free_internal (ptr);
1205        _heaplimit = oldlimit;
1206        result = malloc (size);
1207        if (result == NULL)
1208        {
1209          /* Now we're really in trouble.  We have to unfree
1210             the thing we just freed.  Unfortunately it might
1211             have been coalesced with its neighbors.  */
1212          if (_heapindex == block)
1213            (void) malloc (blocks * BLOCKSIZE);
1214          else
1215          {
1216            __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
1217            (void) malloc (blocks * BLOCKSIZE);
1218            _free_internal (previous);
1219          }
1220          return NULL;
1221        }
1222        if (ptr != result)
1223          memmove (result, ptr, blocks * BLOCKSIZE);
1224      }
1225      break;
1226
1227    default:
1228      /* Old size is a fragment; type is logarithm
1229         to base two of the fragment size.  */
1230      if (size > (__malloc_size_t) (1 << (type - 1)) &&
1231          size <= (__malloc_size_t) (1 << type))
1232        /* The new size is the same kind of fragment.  */
1233        result = ptr;
1234      else
1235      {
1236        /* The new size is different; allocate a new space,
1237           and copy the lesser of the new size and the old. */
1238        result = malloc (size);
1239        if (result == NULL)
1240          return NULL;
1241        memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1242        free (ptr);
1243      }
1244      break;
1245    }
1246
1247  return result;
1248}
1249/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1250
1251This library is free software; you can redistribute it and/or
1252modify it under the terms of the GNU Library General Public License as
1253published by the Free Software Foundation; either version 2 of the
1254License, or (at your option) any later version.
1255
1256This library is distributed in the hope that it will be useful,
1257but WITHOUT ANY WARRANTY; without even the implied warranty of
1258MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1259Library General Public License for more details.
1260
1261You should have received a copy of the GNU Library General Public
1262License along with this library; see the file COPYING.LIB.  If
1263not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1264Cambridge, MA 02139, USA.
1265
1266   The author may be reached (Email) at the address mike@ai.mit.edu,
1267   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1268
1269#ifndef _MALLOC_INTERNAL
1270#define _MALLOC_INTERNAL
1271#include <malloc.h>
1272#endif
1273
1274/* Allocate an array of NMEMB elements each SIZE bytes long.
1275   The entire array is initialized to zeros.  */
1276__ptr_t
1277calloc (nmemb, size)
1278     register __malloc_size_t nmemb;
1279     register __malloc_size_t size;
1280{
1281  register __ptr_t result = malloc (nmemb * size);
1282
1283  if (result != NULL)
1284    (void) memset (result, 0, nmemb * size);
1285
1286  return result;
1287}
1288/* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1289This file is part of the GNU C Library.
1290
1291The GNU C Library is free software; you can redistribute it and/or modify
1292it under the terms of the GNU General Public License as published by
1293the Free Software Foundation; either version 2, or (at your option)
1294any later version.
1295
1296The GNU C Library is distributed in the hope that it will be useful,
1297but WITHOUT ANY WARRANTY; without even the implied warranty of
1298MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1299GNU General Public License for more details.
1300
1301You should have received a copy of the GNU General Public License
1302along with the GNU C Library; see the file COPYING.  If not, write to
1303the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
1304
1305#ifndef _MALLOC_INTERNAL
1306#define _MALLOC_INTERNAL
1307#include <malloc.h>
1308#endif
1309
1310#ifndef __GNU_LIBRARY__
1311#define __sbrk sbrk
1312#endif
1313
1314#ifdef __GNU_LIBRARY__
1315#ifndef __GLIBC__
1316/* It is best not to declare this and cast its result on foreign operating
1317   systems with potentially hostile include files.  */
1318extern __ptr_t __sbrk __P ((int increment));
1319#endif
1320#endif
1321
1322#ifndef NULL
1323#define NULL 0
1324#endif
1325
1326/* Allocate INCREMENT more bytes of data space,
1327   and return the start of data space, or NULL on errors.
1328   If INCREMENT is negative, shrink data space.  */
1329__ptr_t
1330__default_morecore (increment)
1331#ifdef __STDC__
1332     ptrdiff_t increment;
1333#else
1334     int increment;
1335#endif
1336{
1337  __ptr_t result = (__ptr_t) __sbrk ((int) increment);
1338  if (result == (__ptr_t) -1)
1339    return NULL;
1340  return result;
1341}
1342/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1343
1344This library is free software; you can redistribute it and/or
1345modify it under the terms of the GNU Library General Public License as
1346published by the Free Software Foundation; either version 2 of the
1347License, or (at your option) any later version.
1348
1349This library is distributed in the hope that it will be useful,
1350but WITHOUT ANY WARRANTY; without even the implied warranty of
1351MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1352Library General Public License for more details.
1353
1354You should have received a copy of the GNU Library General Public
1355License along with this library; see the file COPYING.LIB.  If
1356not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1357Cambridge, MA 02139, USA.  */
1358
1359#ifndef _MALLOC_INTERNAL
1360#define _MALLOC_INTERNAL
1361#include <malloc.h>
1362#endif
1363
1364__ptr_t (*__memalign_hook) __P ((size_t __size, size_t __alignment));
1365
1366__ptr_t
1367memalign (alignment, size)
1368     __malloc_size_t alignment;
1369     __malloc_size_t size;
1370{
1371  __ptr_t result;
1372  unsigned long int adj;
1373
1374  if (__memalign_hook)
1375    return (*__memalign_hook) (alignment, size);
1376
1377  size = ((size + alignment - 1) / alignment) * alignment;
1378
1379  result = malloc (size);
1380  if (result == NULL)
1381    return NULL;
1382  adj = (unsigned long int) ((unsigned long int) ((char *) result -
1383                                                  (char *) NULL)) % alignment;
1384  if (adj != 0)
1385  {
1386    struct alignlist *l;
1387    for (l = _aligned_blocks; l != NULL; l = l->next)
1388      if (l->aligned == NULL)
1389        /* This slot is free.  Use it.  */
1390        break;
1391    if (l == NULL)
1392    {
1393      l = (struct alignlist *) malloc (sizeof (struct alignlist));
1394      if (l == NULL)
1395      {
1396        free (result);
1397        return NULL;
1398      }
1399      l->next = _aligned_blocks;
1400      _aligned_blocks = l;
1401    }
1402    l->exact = result;
1403    result = l->aligned = (char *) result + alignment - adj;
1404  }
1405
1406  return result;
1407}
1408
Note: See TracBrowser for help on using the repository browser.