Changeset 472f39 in git for Singular/gmalloc.c


Ignore:
Timestamp:
Jul 6, 1999, 3:35:34 PM (25 years ago)
Author:
Hans Schönemann <hannes@…>
Branches:
(u'spielwiese', 'fe61d9c35bf7c61f2b6cbf1b56e25e2f08d536cc')
Children:
ce7ba606241efb95de4d1ab5581428b7143b3be2
Parents:
acfbb5a85f3eb92ffdb442a04348bd0186e1785b
Message:
* hannes: gmalloc.c: 64-bit cleanups (part 1)
          ipid.h, subexpr.h: aligment changes
	  ipid.cc: new funktion: idrec::String (for idhdl)
	  silink.cc: slReadAscii2 introduced,
	  silink.cc: String-fixes for idhdl


git-svn-id: file:///usr/local/Singular/svn/trunk@3235 2c84dea3-7e68-4137-9b89-c4e89433aadc
File:
1 edited

Legend:

Unmodified
Added
Removed
  • Singular/gmalloc.c

    racfbb5a r472f39  
    44/* $Id: */
    55
    6 /* gmalloc used by Singular to have a trusted malloc and valloc 
     6/* gmalloc used by Singular to have a trusted malloc and valloc
    77   slightly edited to include mod2.h and to only provide its functionality
    88   if HAVE_GMALLOC is defined
    9 */   
     9*/
    1010
    1111#ifdef HAVE_CONFIG_H
     
    2424/* Declarations for `malloc' and friends.
    2525   Copyright 1990, 1991, 1992, 1993, 1995 Free Software Foundation, Inc.
    26                   Written May 1989 by Mike Haertel.
     26                  Written May 1989 by Mike Haertel.
    2727
    2828This library is free software; you can redistribute it and/or
     
    4646#ifndef _MALLOC_H
    4747
    48 #define _MALLOC_H       1
     48#define _MALLOC_H        1
    4949
    5050#ifdef _MALLOC_INTERNAL
    5151
    52 #if     defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
     52#if        defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
    5353#include <string.h>
    5454#else
    5555#ifndef memset
    56 #define memset(s, zero, n)      bzero ((s), (n))
     56#define memset(s, zero, n) bzero ((s), (n))
    5757#endif
    5858#ifndef memcpy
    59 #define memcpy(d, s, n)         bcopy ((s), (d), (n))
    60 #endif
    61 #endif
    62 
    63 #if     defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
     59#define memcpy(d, s, n)    bcopy ((s), (d), (n))
     60#endif
     61#endif
     62
     63#if defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
    6464#include <limits.h>
    6565#else
    6666#ifndef CHAR_BIT
    67 #define CHAR_BIT        8
    68 #endif
    69 #endif
    70 
    71 #ifdef  HAVE_UNISTD_H
     67#define CHAR_BIT 8
     68#endif
     69#endif
     70
     71#ifdef HAVE_UNISTD_H
    7272#include <unistd.h>
    7373#endif
    7474
    75 #endif  /* _MALLOC_INTERNAL.  */
    76 
    77 
    78 #ifdef  __cplusplus
     75#endif /* _MALLOC_INTERNAL.  */
     76
     77
     78#ifdef __cplusplus
    7979extern "C"
    8080{
     
    8282
    8383#if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
    84 #undef  __P
    85 #define __P(args)       args
    86 #undef  __ptr_t
    87 #define __ptr_t         void *
     84#undef  __P
     85#define __P(args) args
     86#undef  __ptr_t
     87#define __ptr_t   void *
    8888#else /* Not C++ or ANSI C.  */
    89 #undef  __P
    90 #define __P(args)       ()
    91 #undef  const
    92 #define const
    93 #undef  __ptr_t
    94 #define __ptr_t         char *
     89#undef  __P
     90#define __P(args) ()
     91#undef  const
     92#define const
     93#undef  __ptr_t
     94#define __ptr_t char *
    9595#endif /* C++ or ANSI C.  */
    9696
    9797#if defined (__STDC__) && __STDC__
    9898#include <stddef.h>
    99 #define __malloc_size_t         size_t
    100 #define __malloc_ptrdiff_t      ptrdiff_t
     99#define  __malloc_size_t    size_t
     100#define  __malloc_ptrdiff_t ptrdiff_t
    101101#else
    102 #define __malloc_size_t         unsigned int
    103 #define __malloc_ptrdiff_t      int
    104 #endif
    105 
    106 #ifndef NULL
    107 #define NULL    0
     102#define  __malloc_size_t    unsigned long
     103#define  __malloc_ptrdiff_t long
     104#endif
     105
     106#ifndef  NULL
     107#define  NULL 0
    108108#endif
    109109
     
    121121/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
    122122extern __ptr_t memalign __P ((__malloc_size_t __alignment,
    123                               __malloc_size_t __size));
     123                              __malloc_size_t __size));
    124124
    125125/* Allocate SIZE bytes on a page boundary.  */
     
    134134   and all fragments of a block are the same size.  When all the
    135135   fragments in a block have been freed, the block itself is freed.  */
    136 #define INT_BIT         (CHAR_BIT * sizeof(int))
    137 #define BLOCKLOG        (INT_BIT > 16 ? 12 : 9)
    138 #define BLOCKSIZE       (1 << BLOCKLOG)
    139 #define BLOCKIFY(SIZE)  (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
     136#define INT_BIT        (CHAR_BIT * sizeof(int))
     137#ifdef __alpha
     138#define BLOCKLOG       (13)
     139#else
     140#define BLOCKLOG       (INT_BIT > 16 ? 12 : 9)
     141#endif
     142#define BLOCKSIZE      (1 << BLOCKLOG)
     143#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
    140144
    141145/* Determine the amount of memory spanned by the initial heap table
    142146   (not an absolute limit).  */
    143 #define HEAP            (INT_BIT > 16 ? 4194304 : 65536)
     147#define HEAP           (INT_BIT > 16 ? 4194304 : 65536)
    144148
    145149/* Number of contiguous free blocks allowed to build up at the end of
    146150   memory before they will be returned to the system.  */
    147 #define FINAL_FREE_BLOCKS       8
     151#define FINAL_FREE_BLOCKS 8
    148152
    149153/* Data structure giving per-block information.  */
     
    152156    /* Heap information for a busy block.  */
    153157    struct
    154       {
    155         /* Zero for a large (multiblock) object, or positive giving the
    156            logarithm to the base two of the fragment size.  */
    157         int type;
    158         union
    159           {
    160             struct
    161               {
    162                 __malloc_size_t nfree; /* Free frags in a fragmented block.  */
    163                 __malloc_size_t first; /* First free fragment of the block.  */
    164               } frag;
    165             /* For a large object, in its first block, this has the number
    166                of blocks in the object.  In the other blocks, this has a
    167                negative number which says how far back the first block is.  */
    168             __malloc_ptrdiff_t size;
    169           } info;
    170       } busy;
     158    {
     159      /* Zero for a large (multiblock) object, or positive giving the
     160         logarithm to the base two of the fragment size.  */
     161      int type;
     162      union
     163      {
     164        struct
     165        {
     166          __malloc_size_t nfree; /* Free frags in a fragmented block.  */
     167          __malloc_size_t first; /* First free fragment of the block.  */
     168        } frag;
     169        /* For a large object, in its first block, this has the number
     170           of blocks in the object.  In the other blocks, this has a
     171           negative number which says how far back the first block is.  */
     172        __malloc_ptrdiff_t size;
     173      } info;
     174    } busy;
    171175    /* Heap information for a free block
    172176       (that may be the first of a free cluster).  */
    173177    struct
    174       {
    175         __malloc_size_t size;   /* Size (in blocks) of a free cluster.  */
    176         __malloc_size_t next;   /* Index of next free cluster.  */
    177         __malloc_size_t prev;   /* Index of previous free cluster.  */
    178       } free;
     178    {
     179      __malloc_size_t size;        /* Size (in blocks) of a free cluster.  */
     180      __malloc_size_t next;        /* Index of next free cluster.  */
     181      __malloc_size_t prev;        /* Index of previous free cluster.  */
     182    } free;
    179183  } malloc_info;
    180184
     
    186190
    187191/* Address to block number and vice versa.  */
    188 #define BLOCK(A)        (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
    189 #define ADDRESS(B)      ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
     192#define BLOCK(A)   (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
     193#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
    190194
    191195/* Current search index for the heap table.  */
     
    197201/* Doubly linked lists of free fragments.  */
    198202struct list
    199   {
    200     struct list *next;
    201     struct list *prev;
    202   };
     203{
     204  struct list *next;
     205  struct list *prev;
     206};
    203207
    204208/* Free list headers for each fragment size.  */
     
    207211/* List of blocks allocated with `memalign' (or `valloc').  */
    208212struct alignlist
    209   {
    210     struct alignlist *next;
    211     __ptr_t aligned;            /* The address that memaligned returned.  */
    212     __ptr_t exact;              /* The address that malloc returned.  */
    213   };
     213{
     214  struct alignlist *next;
     215  __ptr_t aligned;        /* The address that memaligned returned.  */
     216  __ptr_t exact;          /* The address that malloc returned.  */
     217};
    214218extern struct alignlist *_aligned_blocks;
    215219
     
    249253extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
    250254extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size,
    251                                         __malloc_size_t __alignment));
     255                                        __malloc_size_t __alignment));
    252256
    253257/* Return values for `mprobe': these are the kinds of inconsistencies that
    254258   `mcheck' enables detection of.  */
    255259enum mcheck_status
    256   {
    257     MCHECK_DISABLED = -1,       /* Consistency checking is not turned on.  */
    258     MCHECK_OK,                  /* Block is fine.  */
    259     MCHECK_FREE,                /* Block freed twice.  */
    260     MCHECK_HEAD,                /* Memory before the block was clobbered.  */
    261     MCHECK_TAIL                 /* Memory after the block was clobbered.  */
    262   };
     260{
     261  MCHECK_DISABLED = -1,  /* Consistency checking is not turned on.  */
     262  MCHECK_OK,             /* Block is fine.  */
     263  MCHECK_FREE,           /* Block freed twice.  */
     264  MCHECK_HEAD,           /* Memory before the block was clobbered.  */
     265  MCHECK_TAIL            /* Memory after the block was clobbered.  */
     266};
    263267
    264268/* Activate a standard collection of debugging hooks.  This must be called
     
    279283/* Statistics available to the user.  */
    280284struct mstats
    281   {
    282     __malloc_size_t bytes_total; /* Total size of the heap. */
    283     __malloc_size_t chunks_used; /* Chunks allocated by the user. */
    284     __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
    285     __malloc_size_t chunks_free; /* Chunks in the free list. */
    286     __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
    287   };
     285{
     286  __malloc_size_t bytes_total; /* Total size of the heap. */
     287  __malloc_size_t chunks_used; /* Chunks allocated by the user. */
     288  __malloc_size_t bytes_used;  /* Byte total of user-allocated chunks. */
     289  __malloc_size_t chunks_free; /* Chunks in the free list. */
     290    __malloc_size_t bytes_free;/* Byte total of chunks in the free list. */
     291};
    288292
    289293/* Pick up the current statistics. */
     
    292296/* Call WARNFUN with a warning message when memory usage is high.  */
    293297extern void memory_warnings __P ((__ptr_t __start,
    294                                   void (*__warnfun) __P ((const char *))));
     298                                  void (*__warnfun) __P ((const char *))));
    295299
    296300
     
    307311
    308312
    309 #ifdef  __cplusplus
     313#ifdef __cplusplus
    310314}
    311315#endif
     
    378382
    379383/* obachman: undef , gnulibc6 conflict with unistd.h */
    380 #define  __getpagesize()        getpagesize()
     384#define __getpagesize() getpagesize()
    381385#endif /* if 0 */
    382386#endif
    383387
    384 #ifndef _MALLOC_INTERNAL
    385 #define _MALLOC_INTERNAL
     388#ifndef _MALLOC_INTERNAL
     389#define _MALLOC_INTERNAL
    386390#include <malloc.h>
    387391#endif
     
    403407/* Memory allocator `malloc'.
    404408   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
    405                   Written May 1989 by Mike Haertel.
     409                  Written May 1989 by Mike Haertel.
    406410
    407411This library is free software; you can redistribute it and/or
     
    423427   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
    424428
    425 #ifndef _MALLOC_INTERNAL
     429#ifndef        _MALLOC_INTERNAL
    426430#define _MALLOC_INTERNAL
    427431#include <malloc.h>
     
    475479  result = (*__morecore) (size);
    476480  adj = (unsigned long int) ((unsigned long int) ((char *) result -
    477                                                   (char *) NULL)) % BLOCKSIZE;
     481                                                  (char *) NULL)) % BLOCKSIZE;
    478482  if (adj != 0)
    479     {
    480       adj = BLOCKSIZE - adj;
    481       (void) (*__morecore) (adj);
    482       result = (char *) result + adj;
    483     }
     483  {
     484    adj = BLOCKSIZE - adj;
     485    (void) (*__morecore) (adj);
     486    result = (char *) result + adj;
     487  }
    484488
    485489  if (__after_morecore_hook)
     
    532536  /* Check if we need to grow the info table.  */
    533537  if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
     538  {
     539    newsize = heapsize;
     540    while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
     541      newsize *= 2;
     542    newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
     543    if (newinfo == NULL)
    534544    {
    535       newsize = heapsize;
    536       while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
    537         newsize *= 2;
    538       newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
    539       if (newinfo == NULL)
    540         {
    541           (*__morecore) (-size);
    542           return NULL;
    543         }
    544       memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
    545       memset (&newinfo[heapsize], 0,
    546               (newsize - heapsize) * sizeof (malloc_info));
    547       oldinfo = _heapinfo;
    548       newinfo[BLOCK (oldinfo)].busy.type = 0;
    549       newinfo[BLOCK (oldinfo)].busy.info.size
    550         = BLOCKIFY (heapsize * sizeof (malloc_info));
    551       _heapinfo = newinfo;
    552       /* Account for the _heapinfo block itself in the statistics.  */
    553       _bytes_used += newsize * sizeof (malloc_info);
    554       ++_chunks_used;
    555       _free_internal (oldinfo);
    556       heapsize = newsize;
     545      (*__morecore) (-size);
     546      return NULL;
    557547    }
     548    memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
     549    memset (&newinfo[heapsize], 0,
     550            (newsize - heapsize) * sizeof (malloc_info));
     551    oldinfo = _heapinfo;
     552    newinfo[BLOCK (oldinfo)].busy.type = 0;
     553    newinfo[BLOCK (oldinfo)].busy.info.size
     554      = BLOCKIFY (heapsize * sizeof (malloc_info));
     555    _heapinfo = newinfo;
     556    /* Account for the _heapinfo block itself in the statistics.  */
     557    _bytes_used += newsize * sizeof (malloc_info);
     558    ++_chunks_used;
     559    _free_internal (oldinfo);
     560    heapsize = newsize;
     561  }
    558562
    559563  _heaplimit = BLOCK ((char *) result + size);
     
    578582     Be compatible.  */
    579583
    580 #if     0
     584#if        0
    581585  if (size == 0)
    582586    return NULL;
     
    600604  /* Determine the allocation policy based on the request size.  */
    601605  if (size <= BLOCKSIZE / 2)
     606  {
     607    /* Small allocation to receive a fragment of a block.
     608       Determine the logarithm to base two of the fragment size. */
     609    register __malloc_size_t log = 1;
     610    --size;
     611    while ((size /= 2) != 0)
     612      ++log;
     613
     614    /* Look in the fragment lists for a
     615       free fragment of the desired size. */
     616    next = _fraghead[log].next;
     617    if (next != NULL)
    602618    {
    603       /* Small allocation to receive a fragment of a block.
    604          Determine the logarithm to base two of the fragment size. */
    605       register __malloc_size_t log = 1;
    606       --size;
    607       while ((size /= 2) != 0)
    608         ++log;
    609 
    610       /* Look in the fragment lists for a
    611          free fragment of the desired size. */
    612       next = _fraghead[log].next;
    613       if (next != NULL)
    614         {
    615           /* There are free fragments of this size.
    616              Pop a fragment out of the fragment list and return it.
    617              Update the block's nfree and first counters. */
    618           result = (__ptr_t) next;
    619           next->prev->next = next->next;
    620           if (next->next != NULL)
    621             next->next->prev = next->prev;
    622           block = BLOCK (result);
    623           if (--_heapinfo[block].busy.info.frag.nfree != 0)
    624             _heapinfo[block].busy.info.frag.first = (unsigned long int)
    625               ((unsigned long int) ((char *) next->next - (char *) NULL)
    626                % BLOCKSIZE) >> log;
    627 
    628           /* Update the statistics.  */
    629           ++_chunks_used;
    630           _bytes_used += 1 << log;
    631           --_chunks_free;
    632           _bytes_free -= 1 << log;
    633         }
    634       else
    635         {
    636           /* No free fragments of the desired size, so get a new block
    637              and break it into fragments, returning the first.  */
    638           result = malloc (BLOCKSIZE);
    639           if (result == NULL)
    640             return NULL;
    641 
    642           /* Link all fragments but the first into the free list.  */
    643           for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
    644             {
    645               next = (struct list *) ((char *) result + (i << log));
    646               next->next = _fraghead[log].next;
    647               next->prev = &_fraghead[log];
    648               next->prev->next = next;
    649               if (next->next != NULL)
    650                 next->next->prev = next;
    651             }
    652 
    653           /* Initialize the nfree and first counters for this block.  */
    654           block = BLOCK (result);
    655           _heapinfo[block].busy.type = log;
    656           _heapinfo[block].busy.info.frag.nfree = i - 1;
    657           _heapinfo[block].busy.info.frag.first = i - 1;
    658 
    659           _chunks_free += (BLOCKSIZE >> log) - 1;
    660           _bytes_free += BLOCKSIZE - (1 << log);
    661           _bytes_used -= BLOCKSIZE - (1 << log);
    662         }
     619      /* There are free fragments of this size.
     620         Pop a fragment out of the fragment list and return it.
     621         Update the block's nfree and first counters. */
     622      result = (__ptr_t) next;
     623      next->prev->next = next->next;
     624      if (next->next != NULL)
     625        next->next->prev = next->prev;
     626      block = BLOCK (result);
     627      if (--_heapinfo[block].busy.info.frag.nfree != 0)
     628        _heapinfo[block].busy.info.frag.first = (unsigned long int)
     629        ((unsigned long int) ((char *) next->next - (char *) NULL)
     630          % BLOCKSIZE) >> log;
     631
     632      /* Update the statistics.  */
     633      ++_chunks_used;
     634      _bytes_used += 1 << log;
     635      --_chunks_free;
     636      _bytes_free -= 1 << log;
    663637    }
     638    else
     639    {
     640      /* No free fragments of the desired size, so get a new block
     641         and break it into fragments, returning the first.  */
     642      result = malloc (BLOCKSIZE);
     643      if (result == NULL)
     644        return NULL;
     645
     646      /* Link all fragments but the first into the free list.  */
     647      for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
     648      {
     649        next = (struct list *) ((char *) result + (i << log));
     650        next->next = _fraghead[log].next;
     651        next->prev = &_fraghead[log];
     652        next->prev->next = next;
     653        if (next->next != NULL)
     654          next->next->prev = next;
     655      }
     656
     657      /* Initialize the nfree and first counters for this block.  */
     658      block = BLOCK (result);
     659      _heapinfo[block].busy.type = log;
     660      _heapinfo[block].busy.info.frag.nfree = i - 1;
     661      _heapinfo[block].busy.info.frag.first = i - 1;
     662
     663      _chunks_free += (BLOCKSIZE >> log) - 1;
     664      _bytes_free += BLOCKSIZE - (1 << log);
     665      _bytes_used -= BLOCKSIZE - (1 << log);
     666    }
     667  }
    664668  else
     669  {
     670    /* Large allocation to receive one or more blocks.
     671       Search the free list in a circle starting at the last place visited.
     672       If we loop completely around without finding a large enough
     673       space we will have to get more memory from the system.  */
     674    blocks = BLOCKIFY (size);
     675    start = block = _heapindex;
     676    while (_heapinfo[block].free.size < blocks)
    665677    {
    666       /* Large allocation to receive one or more blocks.
    667          Search the free list in a circle starting at the last place visited.
    668          If we loop completely around without finding a large enough
    669          space we will have to get more memory from the system.  */
    670       blocks = BLOCKIFY (size);
    671       start = block = _heapindex;
    672       while (_heapinfo[block].free.size < blocks)
    673         {
    674           block = _heapinfo[block].free.next;
    675           if (block == start)
    676             {
    677               /* Need to get more from the system.  Check to see if
    678                  the new core will be contiguous with the final free
    679                  block; if so we don't need to get as much.  */
    680               block = _heapinfo[0].free.prev;
    681               lastblocks = _heapinfo[block].free.size;
    682               if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
    683                   (*__morecore) (0) == ADDRESS (block + lastblocks) &&
    684                   (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
    685                 {
    686                   /* Which block we are extending (the `final free
    687                      block' referred to above) might have changed, if
    688                      it got combined with a freed info table.  */
    689                   block = _heapinfo[0].free.prev;
    690                   _heapinfo[block].free.size += (blocks - lastblocks);
    691                   _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
    692                   continue;
    693                 }
    694               result = morecore (blocks * BLOCKSIZE);
    695               if (result == NULL)
    696                 return NULL;
    697               block = BLOCK (result);
    698               _heapinfo[block].busy.type = 0;
    699               _heapinfo[block].busy.info.size = blocks;
    700               ++_chunks_used;
    701               _bytes_used += blocks * BLOCKSIZE;
    702               return result;
    703             }
    704         }
    705 
    706       /* At this point we have found a suitable free list entry.
    707          Figure out how to remove what we need from the list. */
    708       result = ADDRESS (block);
    709       if (_heapinfo[block].free.size > blocks)
    710         {
    711           /* The block we found has a bit left over,
    712              so relink the tail end back into the free list. */
    713           _heapinfo[block + blocks].free.size
    714             = _heapinfo[block].free.size - blocks;
    715           _heapinfo[block + blocks].free.next
    716             = _heapinfo[block].free.next;
    717           _heapinfo[block + blocks].free.prev
    718             = _heapinfo[block].free.prev;
    719           _heapinfo[_heapinfo[block].free.prev].free.next
    720             = _heapinfo[_heapinfo[block].free.next].free.prev
    721             = _heapindex = block + blocks;
    722         }
    723       else
    724         {
    725           /* The block exactly matches our requirements,
    726              so just remove it from the list. */
    727           _heapinfo[_heapinfo[block].free.next].free.prev
    728             = _heapinfo[block].free.prev;
    729           _heapinfo[_heapinfo[block].free.prev].free.next
    730             = _heapindex = _heapinfo[block].free.next;
    731           --_chunks_free;
    732         }
    733 
    734       _heapinfo[block].busy.type = 0;
    735       _heapinfo[block].busy.info.size = blocks;
    736       ++_chunks_used;
    737       _bytes_used += blocks * BLOCKSIZE;
    738       _bytes_free -= blocks * BLOCKSIZE;
    739 
    740       /* Mark all the blocks of the object just allocated except for the
    741          first with a negative number so you can find the first block by
    742          adding that adjustment.  */
    743       while (--blocks > 0)
    744         _heapinfo[block + blocks].busy.info.size = -blocks;
     678      block = _heapinfo[block].free.next;
     679      if (block == start)
     680      {
     681        /* Need to get more from the system.  Check to see if
     682           the new core will be contiguous with the final free
     683           block; if so we don't need to get as much.  */
     684        block = _heapinfo[0].free.prev;
     685        lastblocks = _heapinfo[block].free.size;
     686        if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
     687          (*__morecore) (0) == ADDRESS (block + lastblocks) &&
     688          (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
     689        {
     690          /* Which block we are extending (the `final free
     691             block' referred to above) might have changed, if
     692             it got combined with a freed info table.  */
     693          block = _heapinfo[0].free.prev;
     694          _heapinfo[block].free.size += (blocks - lastblocks);
     695          _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
     696          continue;
     697        }
     698        result = morecore (blocks * BLOCKSIZE);
     699        if (result == NULL)
     700          return NULL;
     701        block = BLOCK (result);
     702        _heapinfo[block].busy.type = 0;
     703        _heapinfo[block].busy.info.size = blocks;
     704        ++_chunks_used;
     705        _bytes_used += blocks * BLOCKSIZE;
     706        return result;
     707      }
    745708    }
     709
     710    /* At this point we have found a suitable free list entry.
     711       Figure out how to remove what we need from the list. */
     712    result = ADDRESS (block);
     713    if (_heapinfo[block].free.size > blocks)
     714    {
     715      /* The block we found has a bit left over,
     716         so relink the tail end back into the free list. */
     717      _heapinfo[block + blocks].free.size
     718        = _heapinfo[block].free.size - blocks;
     719      _heapinfo[block + blocks].free.next
     720        = _heapinfo[block].free.next;
     721      _heapinfo[block + blocks].free.prev
     722        = _heapinfo[block].free.prev;
     723      _heapinfo[_heapinfo[block].free.prev].free.next
     724        = _heapinfo[_heapinfo[block].free.next].free.prev
     725        = _heapindex = block + blocks;
     726    }
     727    else
     728    {
     729      /* The block exactly matches our requirements,
     730         so just remove it from the list. */
     731      _heapinfo[_heapinfo[block].free.next].free.prev
     732        = _heapinfo[block].free.prev;
     733      _heapinfo[_heapinfo[block].free.prev].free.next
     734        = _heapindex = _heapinfo[block].free.next;
     735      --_chunks_free;
     736    }
     737
     738    _heapinfo[block].busy.type = 0;
     739    _heapinfo[block].busy.info.size = blocks;
     740    ++_chunks_used;
     741    _bytes_used += blocks * BLOCKSIZE;
     742    _bytes_free -= blocks * BLOCKSIZE;
     743
     744    /* Mark all the blocks of the object just allocated except for the
     745       first with a negative number so you can find the first block by
     746       adding that adjustment.  */
     747    while (--blocks > 0)
     748     _heapinfo[block + blocks].busy.info.size = -blocks;
     749  }
    746750
    747751  return result;
     
    779783/* Free a block of memory allocated by `malloc'.
    780784   Copyright 1990, 1991, 1992, 1994 Free Software Foundation, Inc.
    781                   Written May 1989 by Mike Haertel.
     785   Written May 1989 by Mike Haertel.
    782786
    783787This library is free software; you can redistribute it and/or
     
    799803   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
    800804
    801 #ifndef _MALLOC_INTERNAL
     805#ifndef        _MALLOC_INTERNAL
    802806#define _MALLOC_INTERNAL
    803807#include <malloc.h>
     
    825829  type = _heapinfo[block].busy.type;
    826830  switch (type)
    827     {
     831  {
    828832    case 0:
    829833      /* Get as many statistics as early as we can.  */
     
    833837
    834838      /* Find the free cluster previous to this one in the free list.
    835         Start searching at the last block referenced; this may benefit
    836         programs with locality of allocation.  */
     839        Start searching at the last block referenced; this may benefit
     840        programs with locality of allocation.  */
    837841      i = _heapindex;
    838842      if (i > block)
    839         while (i > block)
    840           i = _heapinfo[i].free.prev;
     843        while (i > block)
     844          i = _heapinfo[i].free.prev;
    841845      else
    842         {
    843           do
    844             i = _heapinfo[i].free.next;
    845           while (i > 0 && i < block);
    846           i = _heapinfo[i].free.prev;
    847         }
     846      {
     847        do
     848          i = _heapinfo[i].free.next;
     849        while (i > 0 && i < block);
     850        i = _heapinfo[i].free.prev;
     851      }
    848852
    849853      /* Determine how to link this block into the free list.  */
    850854      if (block == i + _heapinfo[i].free.size)
    851         {
    852           /* Coalesce this block with its predecessor.  */
    853           _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
    854           block = i;
    855         }
     855      {
     856        /* Coalesce this block with its predecessor.  */
     857        _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
     858        block = i;
     859      }
    856860      else
    857         {
    858           /* Really link this block back into the free list.  */
    859           _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
    860           _heapinfo[block].free.next = _heapinfo[i].free.next;
    861           _heapinfo[block].free.prev = i;
    862           _heapinfo[i].free.next = block;
    863           _heapinfo[_heapinfo[block].free.next].free.prev = block;
    864           ++_chunks_free;
    865         }
     861      {
     862        /* Really link this block back into the free list.  */
     863        _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
     864        _heapinfo[block].free.next = _heapinfo[i].free.next;
     865        _heapinfo[block].free.prev = i;
     866        _heapinfo[i].free.next = block;
     867        _heapinfo[_heapinfo[block].free.next].free.prev = block;
     868        ++_chunks_free;
     869      }
    866870
    867871      /* Now that the block is linked in, see if we can coalesce it
    868         with its successor (by deleting its successor from the list
    869         and adding in its size).  */
     872        with its successor (by deleting its successor from the list
     873        and adding in its size).  */
    870874      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
    871         {
    872           _heapinfo[block].free.size
    873             += _heapinfo[_heapinfo[block].free.next].free.size;
    874           _heapinfo[block].free.next
    875             = _heapinfo[_heapinfo[block].free.next].free.next;
    876           _heapinfo[_heapinfo[block].free.next].free.prev = block;
    877           --_chunks_free;
    878         }
     875      {
     876        _heapinfo[block].free.size
     877          += _heapinfo[_heapinfo[block].free.next].free.size;
     878        _heapinfo[block].free.next
     879          = _heapinfo[_heapinfo[block].free.next].free.next;
     880        _heapinfo[_heapinfo[block].free.next].free.prev = block;
     881        --_chunks_free;
     882      }
    879883
    880884      /* Now see if we can return stuff to the system.  */
    881885      blocks = _heapinfo[block].free.size;
    882886      if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
    883           && (*__morecore) (0) == ADDRESS (block + blocks))
    884         {
    885           register __malloc_size_t bytes = blocks * BLOCKSIZE;
    886           _heaplimit -= blocks;
    887           (*__morecore) (-bytes);
    888           _heapinfo[_heapinfo[block].free.prev].free.next
    889             = _heapinfo[block].free.next;
    890           _heapinfo[_heapinfo[block].free.next].free.prev
    891             = _heapinfo[block].free.prev;
    892           block = _heapinfo[block].free.prev;
    893           --_chunks_free;
    894           _bytes_free -= bytes;
    895         }
     887        && (*__morecore) (0) == ADDRESS (block + blocks))
     888      {
     889        register __malloc_size_t bytes = blocks * BLOCKSIZE;
     890        _heaplimit -= blocks;
     891        (*__morecore) (-bytes);
     892        _heapinfo[_heapinfo[block].free.prev].free.next
     893          = _heapinfo[block].free.next;
     894        _heapinfo[_heapinfo[block].free.next].free.prev
     895          = _heapinfo[block].free.prev;
     896        block = _heapinfo[block].free.prev;
     897        --_chunks_free;
     898        _bytes_free -= bytes;
     899      }
    896900
    897901      /* Set the next search to begin at this block.  */
     
    908912      /* Get the address of the first free fragment in this block.  */
    909913      prev = (struct list *) ((char *) ADDRESS (block) +
    910                            (_heapinfo[block].busy.info.frag.first << type));
     914                           (_heapinfo[block].busy.info.frag.first << type));
    911915
    912916      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
    913         {
    914           /* If all fragments of this block are free, remove them
    915              from the fragment list and free the whole block.  */
    916           next = prev;
    917           for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
    918             next = next->next;
    919           prev->prev->next = next;
    920           if (next != NULL)
    921             next->prev = prev->prev;
    922           _heapinfo[block].busy.type = 0;
    923           _heapinfo[block].busy.info.size = 1;
    924 
    925           /* Keep the statistics accurate.  */
    926           ++_chunks_used;
    927           _bytes_used += BLOCKSIZE;
    928           _chunks_free -= BLOCKSIZE >> type;
    929           _bytes_free -= BLOCKSIZE;
    930 
    931           free (ADDRESS (block));
    932         }
     917      {
     918        /* If all fragments of this block are free, remove them
     919           from the fragment list and free the whole block.  */
     920        next = prev;
     921        for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
     922          next = next->next;
     923        prev->prev->next = next;
     924        if (next != NULL)
     925          next->prev = prev->prev;
     926        _heapinfo[block].busy.type = 0;
     927        _heapinfo[block].busy.info.size = 1;
     928
     929        /* Keep the statistics accurate.  */
     930        ++_chunks_used;
     931        _bytes_used += BLOCKSIZE;
     932        _chunks_free -= BLOCKSIZE >> type;
     933        _bytes_free -= BLOCKSIZE;
     934
     935        free (ADDRESS (block));
     936      }
    933937      else if (_heapinfo[block].busy.info.frag.nfree != 0)
    934         {
    935           /* If some fragments of this block are free, link this
    936              fragment into the fragment list after the first free
    937              fragment of this block. */
    938           next = (struct list *) ptr;
    939           next->next = prev->next;
    940           next->prev = prev;
    941           prev->next = next;
    942           if (next->next != NULL)
    943             next->next->prev = next;
    944           ++_heapinfo[block].busy.info.frag.nfree;
    945         }
     938      {
     939        /* If some fragments of this block are free, link this
     940           fragment into the fragment list after the first free
     941           fragment of this block. */
     942        next = (struct list *) ptr;
     943        next->next = prev->next;
     944        next->prev = prev;
     945        prev->next = next;
     946        if (next->next != NULL)
     947          next->next->prev = next;
     948        ++_heapinfo[block].busy.info.frag.nfree;
     949      }
    946950      else
    947         {
    948           /* No fragments of this block are free, so link this
    949              fragment into the fragment list and announce that
    950              it is the first free fragment of this block. */
    951           prev = (struct list *) ptr;
    952           _heapinfo[block].busy.info.frag.nfree = 1;
    953           _heapinfo[block].busy.info.frag.first = (unsigned long int)
    954             ((unsigned long int) ((char *) ptr - (char *) NULL)
    955              % BLOCKSIZE >> type);
    956           prev->next = _fraghead[type].next;
    957           prev->prev = &_fraghead[type];
    958           prev->prev->next = prev;
    959           if (prev->next != NULL)
    960             prev->next->prev = prev;
    961         }
     951      {
     952        /* No fragments of this block are free, so link this
     953           fragment into the fragment list and announce that
     954           it is the first free fragment of this block. */
     955        prev = (struct list *) ptr;
     956        _heapinfo[block].busy.info.frag.nfree = 1;
     957        _heapinfo[block].busy.info.frag.first = (unsigned long int)
     958          ((unsigned long int) ((char *) ptr - (char *) NULL)
     959           % BLOCKSIZE >> type);
     960        prev->next = _fraghead[type].next;
     961        prev->prev = &_fraghead[type];
     962        prev->prev->next = prev;
     963        if (prev->next != NULL)
     964          prev->next->prev = prev;
     965      }
    962966      break;
    963967    }
     
    976980  for (l = _aligned_blocks; l != NULL; l = l->next)
    977981    if (l->aligned == ptr)
    978       {
    979         l->aligned = NULL;      /* Mark the slot in the list as free.  */
    980         ptr = l->exact;
    981         break;
    982       }
     982    {
     983      l->aligned = NULL;        /* Mark the slot in the list as free.  */
     984      ptr = l->exact;
     985      break;
     986    }
    983987
    984988  if (__free_hook != NULL)
     
    10051009Cambridge, MA 02139, USA.  */
    10061010
    1007 #ifndef _MALLOC_INTERNAL
     1011#ifndef        _MALLOC_INTERNAL
    10081012#define _MALLOC_INTERNAL
    10091013#include <malloc.h>
     
    10151019#include <gnu-stabs.h>
    10161020
    1017 #undef  cfree
     1021#undef cfree
    10181022
    10191023function_alias(cfree, free, void, (ptr),
    1020                DEFUN(cfree, (ptr), PTR ptr))
     1024               DEFUN(cfree, (ptr), PTR ptr))
    10211025
    10221026#else
     
    10321036/* Change the size of a block allocated by `malloc'.
    10331037   Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
    1034                      Written May 1989 by Mike Haertel.
     1038                     Written May 1989 by Mike Haertel.
    10351039
    10361040This library is free software; you can redistribute it and/or
     
    10521056   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
    10531057
    1054 #ifndef _MALLOC_INTERNAL
     1058#ifndef        _MALLOC_INTERNAL
    10551059#define _MALLOC_INTERNAL
    10561060#include <malloc.h>
     
    10821086  /* Otherwise, we'll copy from the end.  */
    10831087  else
     1088  {
     1089    register char *endf = from + size;
     1090    register char *endt = to + size;
     1091
     1092    /* If TO - FROM is large, then we should break the copy into
     1093       nonoverlapping chunks of TO - FROM bytes each.  However, if
     1094       TO - FROM is small, then the bcopy function call overhead
     1095       makes this not worth it.  The crossover point could be about
     1096       anywhere.  Since I don't think the obvious copy loop is too
     1097       bad, I'm trying to err in its favor.  */
     1098    if (to - from < 64)
    10841099    {
    1085       register char *endf = from + size;
    1086       register char *endt = to + size;
    1087 
    1088       /* If TO - FROM is large, then we should break the copy into
    1089          nonoverlapping chunks of TO - FROM bytes each.  However, if
    1090          TO - FROM is small, then the bcopy function call overhead
    1091          makes this not worth it.  The crossover point could be about
    1092          anywhere.  Since I don't think the obvious copy loop is too
    1093          bad, I'm trying to err in its favor.  */
    1094       if (to - from < 64)
    1095         {
    1096           do
    1097             *--endt = *--endf;
    1098           while (endf != from);
    1099         }
    1100       else
    1101         {
    1102           for (;;)
    1103             {
    1104               endt -= (to - from);
    1105               endf -= (to - from);
    1106 
    1107               if (endt < to)
    1108                 break;
    1109 
    1110               bcopy (endf, endt, to - from);
    1111             }
    1112 
    1113           /* If SIZE wasn't a multiple of TO - FROM, there will be a
    1114              little left over.  The amount left over is
    1115              (endt + (to - from)) - to, which is endt - from.  */
    1116           bcopy (from, to, endt - from);
    1117         }
     1100      do
     1101        *--endt = *--endf;
     1102      while (endf != from);
    11181103    }
    1119 }     
    1120 #endif  /* Not emacs.  */
     1104    else
     1105    {
     1106      for (;;)
     1107      {
     1108        endt -= (to - from);
     1109        endf -= (to - from);
     1110
     1111        if (endt < to)
     1112          break;
     1113
     1114        bcopy (endf, endt, to - from);
     1115      }
     1116
     1117      /* If SIZE wasn't a multiple of TO - FROM, there will be a
     1118         little left over.  The amount left over is
     1119         (endt + (to - from)) - to, which is endt - from.  */
     1120      bcopy (from, to, endt - from);
     1121    }
     1122  }
     1123}
     1124#endif        /* Not emacs.  */
    11211125
    11221126#define memmove(to, from, size) safe_bcopy ((from), (to), (size))
     
    11461150
    11471151  if (size == 0)
    1148     {
    1149       free (ptr);
    1150       return malloc (0);
    1151     }
     1152  {
     1153    free (ptr);
     1154    return malloc (0);
     1155  }
    11521156  else if (ptr == NULL)
    11531157    return malloc (size);
     
    11601164  type = _heapinfo[block].busy.type;
    11611165  switch (type)
    1162     {
     1166  {
    11631167    case 0:
    11641168      /* Maybe reallocate a large block to a small fragment.  */
    11651169      if (size <= BLOCKSIZE / 2)
    1166         {
    1167           result = malloc (size);
    1168           if (result != NULL)
    1169             {
    1170               memcpy (result, ptr, size);
    1171               _free_internal (ptr);
    1172               return result;
    1173             }
    1174         }
     1170      {
     1171        result = malloc (size);
     1172        if (result != NULL)
     1173        {
     1174          memcpy (result, ptr, size);
     1175          _free_internal (ptr);
     1176          return result;
     1177        }
     1178      }
    11751179
    11761180      /* The new size is a large allocation as well;
    1177         see if we can hold it in place. */
     1181        see if we can hold it in place. */
    11781182      blocks = BLOCKIFY (size);
    11791183      if (blocks < _heapinfo[block].busy.info.size)
    1180         {
    1181           /* The new size is smaller; return
    1182              excess memory to the free list. */
    1183           _heapinfo[block + blocks].busy.type = 0;
    1184           _heapinfo[block + blocks].busy.info.size
    1185             = _heapinfo[block].busy.info.size - blocks;
    1186           _heapinfo[block].busy.info.size = blocks;
    1187           /* We have just created a new chunk by splitting a chunk in two.
    1188              Now we will free this chunk; increment the statistics counter
    1189              so it doesn't become wrong when _free_internal decrements it.  */
    1190           ++_chunks_used;
    1191           _free_internal (ADDRESS (block + blocks));
    1192           result = ptr;
    1193         }
     1184      {
     1185        /* The new size is smaller; return
     1186           excess memory to the free list. */
     1187        _heapinfo[block + blocks].busy.type = 0;
     1188        _heapinfo[block + blocks].busy.info.size
     1189          = _heapinfo[block].busy.info.size - blocks;
     1190        _heapinfo[block].busy.info.size = blocks;
     1191        /* We have just created a new chunk by splitting a chunk in two.
     1192           Now we will free this chunk; increment the statistics counter
     1193           so it doesn't become wrong when _free_internal decrements it.  */
     1194        ++_chunks_used;
     1195        _free_internal (ADDRESS (block + blocks));
     1196        result = ptr;
     1197      }
    11941198      else if (blocks == _heapinfo[block].busy.info.size)
    1195         /* No size change necessary.  */
    1196         result = ptr;
     1199        /* No size change necessary.  */
     1200        result = ptr;
    11971201      else
    1198         {
    1199           /* Won't fit, so allocate a new region that will.
    1200              Free the old region first in case there is sufficient
    1201              adjacent free space to grow without moving. */
    1202           blocks = _heapinfo[block].busy.info.size;
    1203           /* Prevent free from actually returning memory to the system.  */
    1204           oldlimit = _heaplimit;
    1205           _heaplimit = 0;
    1206           _free_internal (ptr);
    1207           _heaplimit = oldlimit;
    1208           result = malloc (size);
    1209           if (result == NULL)
    1210             {
    1211               /* Now we're really in trouble.  We have to unfree
    1212                 the thing we just freed.  Unfortunately it might
    1213                 have been coalesced with its neighbors.  */
    1214               if (_heapindex == block)
    1215                 (void) malloc (blocks * BLOCKSIZE);
    1216               else
    1217                 {
    1218                   __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
    1219                   (void) malloc (blocks * BLOCKSIZE);
    1220                   _free_internal (previous);
    1221                 }
    1222               return NULL;
    1223             }
    1224           if (ptr != result)
    1225             memmove (result, ptr, blocks * BLOCKSIZE);
    1226         }
     1202      {
     1203        /* Won't fit, so allocate a new region that will.
     1204           Free the old region first in case there is sufficient
     1205           adjacent free space to grow without moving. */
     1206        blocks = _heapinfo[block].busy.info.size;
     1207        /* Prevent free from actually returning memory to the system.  */
     1208        oldlimit = _heaplimit;
     1209        _heaplimit = 0;
     1210        _free_internal (ptr);
     1211        _heaplimit = oldlimit;
     1212        result = malloc (size);
     1213        if (result == NULL)
     1214        {
     1215          /* Now we're really in trouble.  We have to unfree
     1216            the thing we just freed.  Unfortunately it might
     1217            have been coalesced with its neighbors.  */
     1218          if (_heapindex == block)
     1219            (void) malloc (blocks * BLOCKSIZE);
     1220          else
     1221          {
     1222            __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
     1223            (void) malloc (blocks * BLOCKSIZE);
     1224            _free_internal (previous);
     1225          }
     1226          return NULL;
     1227        }
     1228        if (ptr != result)
     1229          memmove (result, ptr, blocks * BLOCKSIZE);
     1230      }
    12271231      break;
    12281232
    12291233    default:
    12301234      /* Old size is a fragment; type is logarithm
    1231         to base two of the fragment size.  */
     1235        to base two of the fragment size.  */
    12321236      if (size > (__malloc_size_t) (1 << (type - 1)) &&
    1233           size <= (__malloc_size_t) (1 << type))
    1234         /* The new size is the same kind of fragment.  */
    1235         result = ptr;
     1237          size <= (__malloc_size_t) (1 << type))
     1238        /* The new size is the same kind of fragment.  */
     1239        result = ptr;
    12361240      else
    1237         {
    1238           /* The new size is different; allocate a new space,
    1239              and copy the lesser of the new size and the old. */
    1240           result = malloc (size);
    1241           if (result == NULL)
    1242             return NULL;
    1243           memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
    1244           free (ptr);
    1245         }
     1241      {
     1242        /* The new size is different; allocate a new space,
     1243           and copy the lesser of the new size and the old. */
     1244        result = malloc (size);
     1245        if (result == NULL)
     1246          return NULL;
     1247        memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
     1248        free (ptr);
     1249      }
    12461250      break;
    12471251    }
     
    12691273   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
    12701274
    1271 #ifndef _MALLOC_INTERNAL
    1272 #define _MALLOC_INTERNAL
     1275#ifndef _MALLOC_INTERNAL
     1276#define _MALLOC_INTERNAL
    12731277#include <malloc.h>
    12741278#endif
     
    13051309the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
    13061310
    1307 #ifndef _MALLOC_INTERNAL
    1308 #define _MALLOC_INTERNAL
     1311#ifndef _MALLOC_INTERNAL
     1312#define _MALLOC_INTERNAL
    13091313#include <malloc.h>
    13101314#endif
    13111315
    1312 #ifndef __GNU_LIBRARY__
    1313 #define __sbrk  sbrk
     1316#ifndef __GNU_LIBRARY__
     1317#define __sbrk sbrk
    13141318#endif
    13151319
     
    13591363Cambridge, MA 02139, USA.  */
    13601364
    1361 #ifndef _MALLOC_INTERNAL
     1365#ifndef _MALLOC_INTERNAL
    13621366#define _MALLOC_INTERNAL
    13631367#include <malloc.h>
     
    13831387    return NULL;
    13841388  adj = (unsigned long int) ((unsigned long int) ((char *) result -
    1385                                                   (char *) NULL)) % alignment;
     1389                                                  (char *) NULL)) % alignment;
    13861390  if (adj != 0)
     1391  {
     1392    struct alignlist *l;
     1393    for (l = _aligned_blocks; l != NULL; l = l->next)
     1394      if (l->aligned == NULL)
     1395        /* This slot is free.  Use it.  */
     1396        break;
     1397    if (l == NULL)
    13871398    {
    1388       struct alignlist *l;
    1389       for (l = _aligned_blocks; l != NULL; l = l->next)
    1390         if (l->aligned == NULL)
    1391           /* This slot is free.  Use it.  */
    1392           break;
     1399      l = (struct alignlist *) malloc (sizeof (struct alignlist));
    13931400      if (l == NULL)
    1394         {
    1395           l = (struct alignlist *) malloc (sizeof (struct alignlist));
    1396           if (l == NULL)
    1397             {
    1398               free (result);
    1399               return NULL;
    1400             }
    1401           l->next = _aligned_blocks;
    1402           _aligned_blocks = l;
    1403         }
    1404       l->exact = result;
    1405       result = l->aligned = (char *) result + alignment - adj;
     1401      {
     1402        free (result);
     1403        return NULL;
     1404      }
     1405      l->next = _aligned_blocks;
     1406      _aligned_blocks = l;
    14061407    }
     1408    l->exact = result;
     1409    result = l->aligned = (char *) result + alignment - adj;
     1410  }
    14071411
    14081412  return result;
    14091413}
    1410 
    14111414#endif /* HAVE_GMALLOC */
Note: See TracChangeset for help on using the changeset viewer.