source: git/Singular/gmalloc.c @ 52a5e8

spielwiese
Last change on this file since 52a5e8 was 52a5e8, checked in by Olaf Bachmann <obachman@…>, 25 years ago
* new memory management scheme git-svn-id: file:///usr/local/Singular/svn/trunk@2703 2c84dea3-7e68-4137-9b89-c4e89433aadc
  • Property mode set to 100644
File size: 40.7 KB
Line 
1/****************************************
2*  Computer Algebra System SINGULAR     *
3****************************************/
4/* $Id: */
5
6/* gmalloc used by Singular to have a trusted malloc and valloc
7   slightly edited to include mod2.h and to only provide its functionality
8   if HAVE_GMALLOC is defined
9*/   
10
11#ifdef  HAVE_CONFIG_H
12#include "mod2.h"
13#endif
14
15#ifdef HAVE_GMALLOC
16
17
18#define _MALLOC_INTERNAL
19
20/* The malloc headers and source files from the C library follow here.  */
21
22/* Declarations for `malloc' and friends.
23   Copyright 1990, 1991, 1992, 1993, 1995 Free Software Foundation, Inc.
24                  Written May 1989 by Mike Haertel.
25
26This library is free software; you can redistribute it and/or
27modify it under the terms of the GNU Library General Public License as
28published by the Free Software Foundation; either version 2 of the
29License, or (at your option) any later version.
30
31This library is distributed in the hope that it will be useful,
32but WITHOUT ANY WARRANTY; without even the implied warranty of
33MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
34Library General Public License for more details.
35
36You should have received a copy of the GNU Library General Public
37License along with this library; see the file COPYING.LIB.  If
38not, write to the Free Software Foundation, Inc., 675 Mass Ave,
39Cambridge, MA 02139, USA.
40
41   The author may be reached (Email) at the address mike@ai.mit.edu,
42   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
43
44#ifndef _MALLOC_H
45
46#define _MALLOC_H       1
47
48#ifdef _MALLOC_INTERNAL
49
50#if     defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
51#include <string.h>
52#else
53#ifndef memset
54#define memset(s, zero, n)      bzero ((s), (n))
55#endif
56#ifndef memcpy
57#define memcpy(d, s, n)         bcopy ((s), (d), (n))
58#endif
59#endif
60
61#if     defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
62#include <limits.h>
63#else
64#ifndef CHAR_BIT
65#define CHAR_BIT        8
66#endif
67#endif
68
69#ifdef  HAVE_UNISTD_H
70#include <unistd.h>
71#endif
72
73#endif  /* _MALLOC_INTERNAL.  */
74
75
76#ifdef  __cplusplus
77extern "C"
78{
79#endif
80
81#if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
82#undef  __P
83#define __P(args)       args
84#undef  __ptr_t
85#define __ptr_t         void *
86#else /* Not C++ or ANSI C.  */
87#undef  __P
88#define __P(args)       ()
89#undef  const
90#define const
91#undef  __ptr_t
92#define __ptr_t         char *
93#endif /* C++ or ANSI C.  */
94
95#if defined (__STDC__) && __STDC__
96#include <stddef.h>
97#define __malloc_size_t         size_t
98#define __malloc_ptrdiff_t      ptrdiff_t
99#else
100#define __malloc_size_t         unsigned int
101#define __malloc_ptrdiff_t      int
102#endif
103
104#ifndef NULL
105#define NULL    0
106#endif
107
108
109/* Allocate SIZE bytes of memory.  */
110extern __ptr_t malloc __P ((__malloc_size_t __size));
111/* Re-allocate the previously allocated block
112   in __ptr_t, making the new block SIZE bytes long.  */
113extern __ptr_t realloc __P ((__ptr_t __ptr, __malloc_size_t __size));
114/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
115extern __ptr_t calloc __P ((__malloc_size_t __nmemb, __malloc_size_t __size));
116/* Free a block allocated by `malloc', `realloc' or `calloc'.  */
117extern void free __P ((__ptr_t __ptr));
118
119/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
120extern __ptr_t memalign __P ((__malloc_size_t __alignment,
121                              __malloc_size_t __size));
122
123/* Allocate SIZE bytes on a page boundary.  */
124extern __ptr_t valloc __P ((__malloc_size_t __size));
125
126
127#ifdef _MALLOC_INTERNAL
128
129/* The allocator divides the heap into blocks of fixed size; large
130   requests receive one or more whole blocks, and small requests
131   receive a fragment of a block.  Fragment sizes are powers of two,
132   and all fragments of a block are the same size.  When all the
133   fragments in a block have been freed, the block itself is freed.  */
134#define INT_BIT         (CHAR_BIT * sizeof(int))
135#define BLOCKLOG        (INT_BIT > 16 ? 12 : 9)
136#define BLOCKSIZE       (1 << BLOCKLOG)
137#define BLOCKIFY(SIZE)  (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
138
139/* Determine the amount of memory spanned by the initial heap table
140   (not an absolute limit).  */
141#define HEAP            (INT_BIT > 16 ? 4194304 : 65536)
142
143/* Number of contiguous free blocks allowed to build up at the end of
144   memory before they will be returned to the system.  */
145#define FINAL_FREE_BLOCKS       8
146
147/* Data structure giving per-block information.  */
148typedef union
149  {
150    /* Heap information for a busy block.  */
151    struct
152      {
153        /* Zero for a large (multiblock) object, or positive giving the
154           logarithm to the base two of the fragment size.  */
155        int type;
156        union
157          {
158            struct
159              {
160                __malloc_size_t nfree; /* Free frags in a fragmented block.  */
161                __malloc_size_t first; /* First free fragment of the block.  */
162              } frag;
163            /* For a large object, in its first block, this has the number
164               of blocks in the object.  In the other blocks, this has a
165               negative number which says how far back the first block is.  */
166            __malloc_ptrdiff_t size;
167          } info;
168      } busy;
169    /* Heap information for a free block
170       (that may be the first of a free cluster).  */
171    struct
172      {
173        __malloc_size_t size;   /* Size (in blocks) of a free cluster.  */
174        __malloc_size_t next;   /* Index of next free cluster.  */
175        __malloc_size_t prev;   /* Index of previous free cluster.  */
176      } free;
177  } malloc_info;
178
179/* Pointer to first block of the heap.  */
180extern char *_heapbase;
181
182/* Table indexed by block number giving per-block information.  */
183extern malloc_info *_heapinfo;
184
185/* Address to block number and vice versa.  */
186#define BLOCK(A)        (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
187#define ADDRESS(B)      ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
188
189/* Current search index for the heap table.  */
190extern __malloc_size_t _heapindex;
191
192/* Limit of valid info table indices.  */
193extern __malloc_size_t _heaplimit;
194
195/* Doubly linked lists of free fragments.  */
196struct list
197  {
198    struct list *next;
199    struct list *prev;
200  };
201
202/* Free list headers for each fragment size.  */
203extern struct list _fraghead[];
204
205/* List of blocks allocated with `memalign' (or `valloc').  */
206struct alignlist
207  {
208    struct alignlist *next;
209    __ptr_t aligned;            /* The address that memaligned returned.  */
210    __ptr_t exact;              /* The address that malloc returned.  */
211  };
212extern struct alignlist *_aligned_blocks;
213
214/* Instrumentation.  */
215extern __malloc_size_t _chunks_used;
216extern __malloc_size_t _bytes_used;
217extern __malloc_size_t _chunks_free;
218extern __malloc_size_t _bytes_free;
219
220/* Internal version of `free' used in `morecore' (malloc.c). */
221extern void _free_internal __P ((__ptr_t __ptr));
222
223#endif /* _MALLOC_INTERNAL.  */
224
225/* Given an address in the middle of a malloc'd object,
226   return the address of the beginning of the object.  */
227extern __ptr_t malloc_find_object_address __P ((__ptr_t __ptr));
228
229/* Underlying allocation function; successive calls should
230   return contiguous pieces of memory.  */
231extern __ptr_t (*__morecore) __P ((__malloc_ptrdiff_t __size));
232
233/* Default value of `__morecore'.  */
234extern __ptr_t __default_morecore __P ((__malloc_ptrdiff_t __size));
235
236/* If not NULL, this function is called after each time
237   `__morecore' is called to increase the data size.  */
238extern void (*__after_morecore_hook) __P ((void));
239
240/* Nonzero if `malloc' has been called and done its initialization.  */
241extern int __malloc_initialized;
242
243/* Hooks for debugging versions.  */
244extern void (*__malloc_initialize_hook) __P ((void));
245extern void (*__free_hook) __P ((__ptr_t __ptr));
246extern __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
247extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
248extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size,
249                                        __malloc_size_t __alignment));
250
251/* Return values for `mprobe': these are the kinds of inconsistencies that
252   `mcheck' enables detection of.  */
253enum mcheck_status
254  {
255    MCHECK_DISABLED = -1,       /* Consistency checking is not turned on.  */
256    MCHECK_OK,                  /* Block is fine.  */
257    MCHECK_FREE,                /* Block freed twice.  */
258    MCHECK_HEAD,                /* Memory before the block was clobbered.  */
259    MCHECK_TAIL                 /* Memory after the block was clobbered.  */
260  };
261
262/* Activate a standard collection of debugging hooks.  This must be called
263   before `malloc' is ever called.  ABORTFUNC is called with an error code
264   (see enum above) when an inconsistency is detected.  If ABORTFUNC is
265   null, the standard function prints on stderr and then calls `abort'.  */
266extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
267
268/* Check for aberrations in a particular malloc'd block.  You must have
269   called `mcheck' already.  These are the same checks that `mcheck' does
270   when you free or reallocate a block.  */
271extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
272
273/* Activate a standard collection of tracing hooks.  */
274extern void mtrace __P ((void));
275extern void muntrace __P ((void));
276
277/* Statistics available to the user.  */
278struct mstats
279  {
280    __malloc_size_t bytes_total; /* Total size of the heap. */
281    __malloc_size_t chunks_used; /* Chunks allocated by the user. */
282    __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
283    __malloc_size_t chunks_free; /* Chunks in the free list. */
284    __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
285  };
286
287/* Pick up the current statistics. */
288extern struct mstats mstats __P ((void));
289
290/* Call WARNFUN with a warning message when memory usage is high.  */
291extern void memory_warnings __P ((__ptr_t __start,
292                                  void (*__warnfun) __P ((const char *))));
293
294
295/* Relocating allocator.  */
296
297/* Allocate SIZE bytes, and store the address in *HANDLEPTR.  */
298extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
299
300/* Free the storage allocated in HANDLEPTR.  */
301extern void r_alloc_free __P ((__ptr_t *__handleptr));
302
303/* Adjust the block at HANDLEPTR to be SIZE bytes long.  */
304extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
305
306
307#ifdef  __cplusplus
308}
309#endif
310
311#endif /* malloc.h  */
312/* Allocate memory on a page boundary.
313   Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
314
315This library is free software; you can redistribute it and/or
316modify it under the terms of the GNU Library General Public License as
317published by the Free Software Foundation; either version 2 of the
318License, or (at your option) any later version.
319
320This library is distributed in the hope that it will be useful,
321but WITHOUT ANY WARRANTY; without even the implied warranty of
322MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
323Library General Public License for more details.
324
325You should have received a copy of the GNU Library General Public
326License along with this library; see the file COPYING.LIB.  If
327not, write to the Free Software Foundation, Inc., 675 Mass Ave,
328Cambridge, MA 02139, USA.
329
330   The author may be reached (Email) at the address mike@ai.mit.edu,
331   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
332
333#if defined (__GNU_LIBRARY__) || defined (_LIBC)
334#include <stddef.h>
335#include <sys/cdefs.h>
336extern size_t __getpagesize __P ((void));
337#else
338#if 0 /* obachman: pasted in getpagesize.h manually */
339#include "getpagesize.h"
340#else
341
342#ifdef VMS
343#define getpagesize() 512
344#endif
345
346#ifdef HAVE_UNISTD_H
347#include <unistd.h>
348#endif
349
350#ifdef _SC_PAGESIZE
351#define getpagesize() sysconf(_SC_PAGESIZE)
352#else
353
354#include <sys/param.h>
355
356#ifdef EXEC_PAGESIZE
357#define getpagesize() EXEC_PAGESIZE
358#else
359#ifdef NBPG
360#define getpagesize() NBPG * CLSIZE
361#ifndef CLSIZE
362#define CLSIZE 1
363#endif /* no CLSIZE */
364#else /* no NBPG */
365#ifdef NBPC
366#define getpagesize() NBPC
367#else /* no NBPC */
368#ifdef PAGESIZE
369#define getpagesize() PAGESIZE
370#endif
371#endif /* NBPC */
372#endif /* no NBPG */
373#endif /* no EXEC_PAGESIZE */
374#endif /* no _SC_PAGESIZE */
375
376#define  __getpagesize()        getpagesize()
377#endif /* if 0 */
378#endif
379
380#ifndef _MALLOC_INTERNAL
381#define _MALLOC_INTERNAL
382#include <malloc.h>
383#endif
384
385static __malloc_size_t pagesize;
386
387__ptr_t
388valloc (size)
389     __malloc_size_t size;
390{
391  if (pagesize == 0)
392    pagesize = __getpagesize ();
393
394  return memalign (pagesize, size);
395}
396/* Memory allocator `malloc'.
397   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
398                  Written May 1989 by Mike Haertel.
399
400This library is free software; you can redistribute it and/or
401modify it under the terms of the GNU Library General Public License as
402published by the Free Software Foundation; either version 2 of the
403License, or (at your option) any later version.
404
405This library is distributed in the hope that it will be useful,
406but WITHOUT ANY WARRANTY; without even the implied warranty of
407MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
408Library General Public License for more details.
409
410You should have received a copy of the GNU Library General Public
411License along with this library; see the file COPYING.LIB.  If
412not, write to the Free Software Foundation, Inc., 675 Mass Ave,
413Cambridge, MA 02139, USA.
414
415   The author may be reached (Email) at the address mike@ai.mit.edu,
416   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
417
418#ifndef _MALLOC_INTERNAL
419#define _MALLOC_INTERNAL
420#include <malloc.h>
421#endif
422
423/* How to really get more memory.  */
424__ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
425
426/* Debugging hook for `malloc'.  */
427__ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
428
429/* Pointer to the base of the first block.  */
430char *_heapbase;
431
432/* Block information table.  Allocated with align/__free (not malloc/free).  */
433malloc_info *_heapinfo;
434
435/* Number of info entries.  */
436static __malloc_size_t heapsize;
437
438/* Search index in the info table.  */
439__malloc_size_t _heapindex;
440
441/* Limit of valid info table indices.  */
442__malloc_size_t _heaplimit;
443
444/* Free lists for each fragment size.  */
445struct list _fraghead[BLOCKLOG];
446
447/* Instrumentation.  */
448__malloc_size_t _chunks_used;
449__malloc_size_t _bytes_used;
450__malloc_size_t _chunks_free;
451__malloc_size_t _bytes_free;
452
453/* Are you experienced?  */
454int __malloc_initialized;
455
456void (*__malloc_initialize_hook) __P ((void));
457void (*__after_morecore_hook) __P ((void));
458
459/* Aligned allocation.  */
460static __ptr_t align __P ((__malloc_size_t));
461static __ptr_t
462align (size)
463     __malloc_size_t size;
464{
465  __ptr_t result;
466  unsigned long int adj;
467
468  result = (*__morecore) (size);
469  adj = (unsigned long int) ((unsigned long int) ((char *) result -
470                                                  (char *) NULL)) % BLOCKSIZE;
471  if (adj != 0)
472    {
473      adj = BLOCKSIZE - adj;
474      (void) (*__morecore) (adj);
475      result = (char *) result + adj;
476    }
477
478  if (__after_morecore_hook)
479    (*__after_morecore_hook) ();
480
481  return result;
482}
483
484/* Set everything up and remember that we have.  */
485static int initialize __P ((void));
486static int
487initialize ()
488{
489  if (__malloc_initialize_hook)
490    (*__malloc_initialize_hook) ();
491
492  heapsize = HEAP / BLOCKSIZE;
493  _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
494  if (_heapinfo == NULL)
495    return 0;
496  memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
497  _heapinfo[0].free.size = 0;
498  _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
499  _heapindex = 0;
500  _heapbase = (char *) _heapinfo;
501
502  /* Account for the _heapinfo block itself in the statistics.  */
503  _bytes_used = heapsize * sizeof (malloc_info);
504  _chunks_used = 1;
505
506  __malloc_initialized = 1;
507  return 1;
508}
509
510/* Get neatly aligned memory, initializing or
511   growing the heap info table as necessary. */
512static __ptr_t morecore __P ((__malloc_size_t));
513static __ptr_t
514morecore (size)
515     __malloc_size_t size;
516{
517  __ptr_t result;
518  malloc_info *newinfo, *oldinfo;
519  __malloc_size_t newsize;
520
521  result = align (size);
522  if (result == NULL)
523    return NULL;
524
525  /* Check if we need to grow the info table.  */
526  if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
527    {
528      newsize = heapsize;
529      while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
530        newsize *= 2;
531      newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
532      if (newinfo == NULL)
533        {
534          (*__morecore) (-size);
535          return NULL;
536        }
537      memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
538      memset (&newinfo[heapsize], 0,
539              (newsize - heapsize) * sizeof (malloc_info));
540      oldinfo = _heapinfo;
541      newinfo[BLOCK (oldinfo)].busy.type = 0;
542      newinfo[BLOCK (oldinfo)].busy.info.size
543        = BLOCKIFY (heapsize * sizeof (malloc_info));
544      _heapinfo = newinfo;
545      /* Account for the _heapinfo block itself in the statistics.  */
546      _bytes_used += newsize * sizeof (malloc_info);
547      ++_chunks_used;
548      _free_internal (oldinfo);
549      heapsize = newsize;
550    }
551
552  _heaplimit = BLOCK ((char *) result + size);
553  return result;
554}
555
556/* Allocate memory from the heap.  */
557__ptr_t
558malloc (size)
559     __malloc_size_t size;
560{
561  __ptr_t result;
562  __malloc_size_t block, blocks, lastblocks, start;
563  register __malloc_size_t i;
564  struct list *next;
565
566  /* ANSI C allows `malloc (0)' to either return NULL, or to return a
567     valid address you can realloc and free (though not dereference).
568
569     It turns out that some extant code (sunrpc, at least Ultrix's version)
570     expects `malloc (0)' to return non-NULL and breaks otherwise.
571     Be compatible.  */
572
573#if     0
574  if (size == 0)
575    return NULL;
576#endif
577
578  if (__malloc_hook != NULL)
579    return (*__malloc_hook) (size);
580
581  if (!__malloc_initialized)
582    if (!initialize ())
583      return NULL;
584
585  if (size < sizeof (struct list))
586    size = sizeof (struct list);
587
588#ifdef SUNOS_LOCALTIME_BUG
589  if (size < 16)
590    size = 16;
591#endif
592
593  /* Determine the allocation policy based on the request size.  */
594  if (size <= BLOCKSIZE / 2)
595    {
596      /* Small allocation to receive a fragment of a block.
597         Determine the logarithm to base two of the fragment size. */
598      register __malloc_size_t log = 1;
599      --size;
600      while ((size /= 2) != 0)
601        ++log;
602
603      /* Look in the fragment lists for a
604         free fragment of the desired size. */
605      next = _fraghead[log].next;
606      if (next != NULL)
607        {
608          /* There are free fragments of this size.
609             Pop a fragment out of the fragment list and return it.
610             Update the block's nfree and first counters. */
611          result = (__ptr_t) next;
612          next->prev->next = next->next;
613          if (next->next != NULL)
614            next->next->prev = next->prev;
615          block = BLOCK (result);
616          if (--_heapinfo[block].busy.info.frag.nfree != 0)
617            _heapinfo[block].busy.info.frag.first = (unsigned long int)
618              ((unsigned long int) ((char *) next->next - (char *) NULL)
619               % BLOCKSIZE) >> log;
620
621          /* Update the statistics.  */
622          ++_chunks_used;
623          _bytes_used += 1 << log;
624          --_chunks_free;
625          _bytes_free -= 1 << log;
626        }
627      else
628        {
629          /* No free fragments of the desired size, so get a new block
630             and break it into fragments, returning the first.  */
631          result = malloc (BLOCKSIZE);
632          if (result == NULL)
633            return NULL;
634
635          /* Link all fragments but the first into the free list.  */
636          for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
637            {
638              next = (struct list *) ((char *) result + (i << log));
639              next->next = _fraghead[log].next;
640              next->prev = &_fraghead[log];
641              next->prev->next = next;
642              if (next->next != NULL)
643                next->next->prev = next;
644            }
645
646          /* Initialize the nfree and first counters for this block.  */
647          block = BLOCK (result);
648          _heapinfo[block].busy.type = log;
649          _heapinfo[block].busy.info.frag.nfree = i - 1;
650          _heapinfo[block].busy.info.frag.first = i - 1;
651
652          _chunks_free += (BLOCKSIZE >> log) - 1;
653          _bytes_free += BLOCKSIZE - (1 << log);
654          _bytes_used -= BLOCKSIZE - (1 << log);
655        }
656    }
657  else
658    {
659      /* Large allocation to receive one or more blocks.
660         Search the free list in a circle starting at the last place visited.
661         If we loop completely around without finding a large enough
662         space we will have to get more memory from the system.  */
663      blocks = BLOCKIFY (size);
664      start = block = _heapindex;
665      while (_heapinfo[block].free.size < blocks)
666        {
667          block = _heapinfo[block].free.next;
668          if (block == start)
669            {
670              /* Need to get more from the system.  Check to see if
671                 the new core will be contiguous with the final free
672                 block; if so we don't need to get as much.  */
673              block = _heapinfo[0].free.prev;
674              lastblocks = _heapinfo[block].free.size;
675              if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
676                  (*__morecore) (0) == ADDRESS (block + lastblocks) &&
677                  (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
678                {
679                  /* Which block we are extending (the `final free
680                     block' referred to above) might have changed, if
681                     it got combined with a freed info table.  */
682                  block = _heapinfo[0].free.prev;
683                  _heapinfo[block].free.size += (blocks - lastblocks);
684                  _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
685                  continue;
686                }
687              result = morecore (blocks * BLOCKSIZE);
688              if (result == NULL)
689                return NULL;
690              block = BLOCK (result);
691              _heapinfo[block].busy.type = 0;
692              _heapinfo[block].busy.info.size = blocks;
693              ++_chunks_used;
694              _bytes_used += blocks * BLOCKSIZE;
695              return result;
696            }
697        }
698
699      /* At this point we have found a suitable free list entry.
700         Figure out how to remove what we need from the list. */
701      result = ADDRESS (block);
702      if (_heapinfo[block].free.size > blocks)
703        {
704          /* The block we found has a bit left over,
705             so relink the tail end back into the free list. */
706          _heapinfo[block + blocks].free.size
707            = _heapinfo[block].free.size - blocks;
708          _heapinfo[block + blocks].free.next
709            = _heapinfo[block].free.next;
710          _heapinfo[block + blocks].free.prev
711            = _heapinfo[block].free.prev;
712          _heapinfo[_heapinfo[block].free.prev].free.next
713            = _heapinfo[_heapinfo[block].free.next].free.prev
714            = _heapindex = block + blocks;
715        }
716      else
717        {
718          /* The block exactly matches our requirements,
719             so just remove it from the list. */
720          _heapinfo[_heapinfo[block].free.next].free.prev
721            = _heapinfo[block].free.prev;
722          _heapinfo[_heapinfo[block].free.prev].free.next
723            = _heapindex = _heapinfo[block].free.next;
724          --_chunks_free;
725        }
726
727      _heapinfo[block].busy.type = 0;
728      _heapinfo[block].busy.info.size = blocks;
729      ++_chunks_used;
730      _bytes_used += blocks * BLOCKSIZE;
731      _bytes_free -= blocks * BLOCKSIZE;
732
733      /* Mark all the blocks of the object just allocated except for the
734         first with a negative number so you can find the first block by
735         adding that adjustment.  */
736      while (--blocks > 0)
737        _heapinfo[block + blocks].busy.info.size = -blocks;
738    }
739
740  return result;
741}
742
743#ifndef _LIBC
744
745/* On some ANSI C systems, some libc functions call _malloc, _free
746   and _realloc.  Make them use the GNU functions.  */
747
748__ptr_t
749_malloc (size)
750     __malloc_size_t size;
751{
752  return malloc (size);
753}
754
755void
756_free (ptr)
757     __ptr_t ptr;
758{
759  free (ptr);
760}
761
762__ptr_t
763_realloc (ptr, size)
764     __ptr_t ptr;
765     __malloc_size_t size;
766{
767  return realloc (ptr, size);
768}
769
770#endif
771/* Free a block of memory allocated by `malloc'.
772   Copyright 1990, 1991, 1992, 1994 Free Software Foundation, Inc.
773                  Written May 1989 by Mike Haertel.
774
775This library is free software; you can redistribute it and/or
776modify it under the terms of the GNU Library General Public License as
777published by the Free Software Foundation; either version 2 of the
778License, or (at your option) any later version.
779
780This library is distributed in the hope that it will be useful,
781but WITHOUT ANY WARRANTY; without even the implied warranty of
782MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
783Library General Public License for more details.
784
785You should have received a copy of the GNU Library General Public
786License along with this library; see the file COPYING.LIB.  If
787not, write to the Free Software Foundation, Inc., 675 Mass Ave,
788Cambridge, MA 02139, USA.
789
790   The author may be reached (Email) at the address mike@ai.mit.edu,
791   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
792
793#ifndef _MALLOC_INTERNAL
794#define _MALLOC_INTERNAL
795#include <malloc.h>
796#endif
797
798/* Debugging hook for free.  */
799void (*__free_hook) __P ((__ptr_t __ptr));
800
801/* List of blocks allocated by memalign.  */
802struct alignlist *_aligned_blocks = NULL;
803
804/* Return memory to the heap.
805   Like `free' but don't call a __free_hook if there is one.  */
806void
807_free_internal (ptr)
808     __ptr_t ptr;
809{
810  int type;
811  __malloc_size_t block, blocks;
812  register __malloc_size_t i;
813  struct list *prev, *next;
814
815  block = BLOCK (ptr);
816
817  type = _heapinfo[block].busy.type;
818  switch (type)
819    {
820    case 0:
821      /* Get as many statistics as early as we can.  */
822      --_chunks_used;
823      _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
824      _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
825
826      /* Find the free cluster previous to this one in the free list.
827         Start searching at the last block referenced; this may benefit
828         programs with locality of allocation.  */
829      i = _heapindex;
830      if (i > block)
831        while (i > block)
832          i = _heapinfo[i].free.prev;
833      else
834        {
835          do
836            i = _heapinfo[i].free.next;
837          while (i > 0 && i < block);
838          i = _heapinfo[i].free.prev;
839        }
840
841      /* Determine how to link this block into the free list.  */
842      if (block == i + _heapinfo[i].free.size)
843        {
844          /* Coalesce this block with its predecessor.  */
845          _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
846          block = i;
847        }
848      else
849        {
850          /* Really link this block back into the free list.  */
851          _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
852          _heapinfo[block].free.next = _heapinfo[i].free.next;
853          _heapinfo[block].free.prev = i;
854          _heapinfo[i].free.next = block;
855          _heapinfo[_heapinfo[block].free.next].free.prev = block;
856          ++_chunks_free;
857        }
858
859      /* Now that the block is linked in, see if we can coalesce it
860         with its successor (by deleting its successor from the list
861         and adding in its size).  */
862      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
863        {
864          _heapinfo[block].free.size
865            += _heapinfo[_heapinfo[block].free.next].free.size;
866          _heapinfo[block].free.next
867            = _heapinfo[_heapinfo[block].free.next].free.next;
868          _heapinfo[_heapinfo[block].free.next].free.prev = block;
869          --_chunks_free;
870        }
871
872      /* Now see if we can return stuff to the system.  */
873      blocks = _heapinfo[block].free.size;
874      if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
875          && (*__morecore) (0) == ADDRESS (block + blocks))
876        {
877          register __malloc_size_t bytes = blocks * BLOCKSIZE;
878          _heaplimit -= blocks;
879          (*__morecore) (-bytes);
880          _heapinfo[_heapinfo[block].free.prev].free.next
881            = _heapinfo[block].free.next;
882          _heapinfo[_heapinfo[block].free.next].free.prev
883            = _heapinfo[block].free.prev;
884          block = _heapinfo[block].free.prev;
885          --_chunks_free;
886          _bytes_free -= bytes;
887        }
888
889      /* Set the next search to begin at this block.  */
890      _heapindex = block;
891      break;
892
893    default:
894      /* Do some of the statistics.  */
895      --_chunks_used;
896      _bytes_used -= 1 << type;
897      ++_chunks_free;
898      _bytes_free += 1 << type;
899
900      /* Get the address of the first free fragment in this block.  */
901      prev = (struct list *) ((char *) ADDRESS (block) +
902                           (_heapinfo[block].busy.info.frag.first << type));
903
904      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
905        {
906          /* If all fragments of this block are free, remove them
907             from the fragment list and free the whole block.  */
908          next = prev;
909          for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
910            next = next->next;
911          prev->prev->next = next;
912          if (next != NULL)
913            next->prev = prev->prev;
914          _heapinfo[block].busy.type = 0;
915          _heapinfo[block].busy.info.size = 1;
916
917          /* Keep the statistics accurate.  */
918          ++_chunks_used;
919          _bytes_used += BLOCKSIZE;
920          _chunks_free -= BLOCKSIZE >> type;
921          _bytes_free -= BLOCKSIZE;
922
923          free (ADDRESS (block));
924        }
925      else if (_heapinfo[block].busy.info.frag.nfree != 0)
926        {
927          /* If some fragments of this block are free, link this
928             fragment into the fragment list after the first free
929             fragment of this block. */
930          next = (struct list *) ptr;
931          next->next = prev->next;
932          next->prev = prev;
933          prev->next = next;
934          if (next->next != NULL)
935            next->next->prev = next;
936          ++_heapinfo[block].busy.info.frag.nfree;
937        }
938      else
939        {
940          /* No fragments of this block are free, so link this
941             fragment into the fragment list and announce that
942             it is the first free fragment of this block. */
943          prev = (struct list *) ptr;
944          _heapinfo[block].busy.info.frag.nfree = 1;
945          _heapinfo[block].busy.info.frag.first = (unsigned long int)
946            ((unsigned long int) ((char *) ptr - (char *) NULL)
947             % BLOCKSIZE >> type);
948          prev->next = _fraghead[type].next;
949          prev->prev = &_fraghead[type];
950          prev->prev->next = prev;
951          if (prev->next != NULL)
952            prev->next->prev = prev;
953        }
954      break;
955    }
956}
957
958/* Return memory to the heap.  */
959void
960free (ptr)
961     __ptr_t ptr;
962{
963  register struct alignlist *l;
964
965  if (ptr == NULL)
966    return;
967
968  for (l = _aligned_blocks; l != NULL; l = l->next)
969    if (l->aligned == ptr)
970      {
971        l->aligned = NULL;      /* Mark the slot in the list as free.  */
972        ptr = l->exact;
973        break;
974      }
975
976  if (__free_hook != NULL)
977    (*__free_hook) (ptr);
978  else
979    _free_internal (ptr);
980}
981/* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
982This file is part of the GNU C Library.
983
984The GNU C Library is free software; you can redistribute it and/or
985modify it under the terms of the GNU Library General Public License as
986published by the Free Software Foundation; either version 2 of the
987License, or (at your option) any later version.
988
989The GNU C Library is distributed in the hope that it will be useful,
990but WITHOUT ANY WARRANTY; without even the implied warranty of
991MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
992Library General Public License for more details.
993
994You should have received a copy of the GNU Library General Public
995License along with the GNU C Library; see the file COPYING.LIB.  If
996not, write to the Free Software Foundation, Inc., 675 Mass Ave,
997Cambridge, MA 02139, USA.  */
998
999#ifndef _MALLOC_INTERNAL
1000#define _MALLOC_INTERNAL
1001#include <malloc.h>
1002#endif
1003
1004#ifdef _LIBC
1005
1006#include <ansidecl.h>
1007#include <gnu-stabs.h>
1008
1009#undef  cfree
1010
1011function_alias(cfree, free, void, (ptr),
1012               DEFUN(cfree, (ptr), PTR ptr))
1013
1014#else
1015
1016void
1017cfree (ptr)
1018     __ptr_t ptr;
1019{
1020  free (ptr);
1021}
1022
1023#endif
1024/* Change the size of a block allocated by `malloc'.
1025   Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1026                     Written May 1989 by Mike Haertel.
1027
1028This library is free software; you can redistribute it and/or
1029modify it under the terms of the GNU Library General Public License as
1030published by the Free Software Foundation; either version 2 of the
1031License, or (at your option) any later version.
1032
1033This library is distributed in the hope that it will be useful,
1034but WITHOUT ANY WARRANTY; without even the implied warranty of
1035MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1036Library General Public License for more details.
1037
1038You should have received a copy of the GNU Library General Public
1039License along with this library; see the file COPYING.LIB.  If
1040not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1041Cambridge, MA 02139, USA.
1042
1043   The author may be reached (Email) at the address mike@ai.mit.edu,
1044   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1045
1046#ifndef _MALLOC_INTERNAL
1047#define _MALLOC_INTERNAL
1048#include <malloc.h>
1049#endif
1050
1051#if  (defined (MEMMOVE_MISSING) || \
1052      !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1053
1054/* Snarfed directly from Emacs src/dispnew.c:
1055   XXX Should use system bcopy if it handles overlap.  */
1056#ifndef emacs
1057
1058/* Like bcopy except never gets confused by overlap.  */
1059
1060static void
1061safe_bcopy (from, to, size)
1062     char *from, *to;
1063     int size;
1064{
1065  if (size <= 0 || from == to)
1066    return;
1067
1068  /* If the source and destination don't overlap, then bcopy can
1069     handle it.  If they do overlap, but the destination is lower in
1070     memory than the source, we'll assume bcopy can handle that.  */
1071  if (to < from || from + size <= to)
1072    bcopy (from, to, size);
1073
1074  /* Otherwise, we'll copy from the end.  */
1075  else
1076    {
1077      register char *endf = from + size;
1078      register char *endt = to + size;
1079
1080      /* If TO - FROM is large, then we should break the copy into
1081         nonoverlapping chunks of TO - FROM bytes each.  However, if
1082         TO - FROM is small, then the bcopy function call overhead
1083         makes this not worth it.  The crossover point could be about
1084         anywhere.  Since I don't think the obvious copy loop is too
1085         bad, I'm trying to err in its favor.  */
1086      if (to - from < 64)
1087        {
1088          do
1089            *--endt = *--endf;
1090          while (endf != from);
1091        }
1092      else
1093        {
1094          for (;;)
1095            {
1096              endt -= (to - from);
1097              endf -= (to - from);
1098
1099              if (endt < to)
1100                break;
1101
1102              bcopy (endf, endt, to - from);
1103            }
1104
1105          /* If SIZE wasn't a multiple of TO - FROM, there will be a
1106             little left over.  The amount left over is
1107             (endt + (to - from)) - to, which is endt - from.  */
1108          bcopy (from, to, endt - from);
1109        }
1110    }
1111}     
1112#endif  /* Not emacs.  */
1113
1114#define memmove(to, from, size) safe_bcopy ((from), (to), (size))
1115
1116#endif
1117
1118
1119#define min(A, B) ((A) < (B) ? (A) : (B))
1120
1121/* Debugging hook for realloc.  */
1122__ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
1123
1124/* Resize the given region to the new size, returning a pointer
1125   to the (possibly moved) region.  This is optimized for speed;
1126   some benchmarks seem to indicate that greater compactness is
1127   achieved by unconditionally allocating and copying to a
1128   new region.  This module has incestuous knowledge of the
1129   internals of both free and malloc. */
1130__ptr_t
1131realloc (ptr, size)
1132     __ptr_t ptr;
1133     __malloc_size_t size;
1134{
1135  __ptr_t result;
1136  int type;
1137  __malloc_size_t block, blocks, oldlimit;
1138
1139  if (size == 0)
1140    {
1141      free (ptr);
1142      return malloc (0);
1143    }
1144  else if (ptr == NULL)
1145    return malloc (size);
1146
1147  if (__realloc_hook != NULL)
1148    return (*__realloc_hook) (ptr, size);
1149
1150  block = BLOCK (ptr);
1151
1152  type = _heapinfo[block].busy.type;
1153  switch (type)
1154    {
1155    case 0:
1156      /* Maybe reallocate a large block to a small fragment.  */
1157      if (size <= BLOCKSIZE / 2)
1158        {
1159          result = malloc (size);
1160          if (result != NULL)
1161            {
1162              memcpy (result, ptr, size);
1163              _free_internal (ptr);
1164              return result;
1165            }
1166        }
1167
1168      /* The new size is a large allocation as well;
1169         see if we can hold it in place. */
1170      blocks = BLOCKIFY (size);
1171      if (blocks < _heapinfo[block].busy.info.size)
1172        {
1173          /* The new size is smaller; return
1174             excess memory to the free list. */
1175          _heapinfo[block + blocks].busy.type = 0;
1176          _heapinfo[block + blocks].busy.info.size
1177            = _heapinfo[block].busy.info.size - blocks;
1178          _heapinfo[block].busy.info.size = blocks;
1179          /* We have just created a new chunk by splitting a chunk in two.
1180             Now we will free this chunk; increment the statistics counter
1181             so it doesn't become wrong when _free_internal decrements it.  */
1182          ++_chunks_used;
1183          _free_internal (ADDRESS (block + blocks));
1184          result = ptr;
1185        }
1186      else if (blocks == _heapinfo[block].busy.info.size)
1187        /* No size change necessary.  */
1188        result = ptr;
1189      else
1190        {
1191          /* Won't fit, so allocate a new region that will.
1192             Free the old region first in case there is sufficient
1193             adjacent free space to grow without moving. */
1194          blocks = _heapinfo[block].busy.info.size;
1195          /* Prevent free from actually returning memory to the system.  */
1196          oldlimit = _heaplimit;
1197          _heaplimit = 0;
1198          _free_internal (ptr);
1199          _heaplimit = oldlimit;
1200          result = malloc (size);
1201          if (result == NULL)
1202            {
1203              /* Now we're really in trouble.  We have to unfree
1204                 the thing we just freed.  Unfortunately it might
1205                 have been coalesced with its neighbors.  */
1206              if (_heapindex == block)
1207                (void) malloc (blocks * BLOCKSIZE);
1208              else
1209                {
1210                  __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
1211                  (void) malloc (blocks * BLOCKSIZE);
1212                  _free_internal (previous);
1213                }
1214              return NULL;
1215            }
1216          if (ptr != result)
1217            memmove (result, ptr, blocks * BLOCKSIZE);
1218        }
1219      break;
1220
1221    default:
1222      /* Old size is a fragment; type is logarithm
1223         to base two of the fragment size.  */
1224      if (size > (__malloc_size_t) (1 << (type - 1)) &&
1225          size <= (__malloc_size_t) (1 << type))
1226        /* The new size is the same kind of fragment.  */
1227        result = ptr;
1228      else
1229        {
1230          /* The new size is different; allocate a new space,
1231             and copy the lesser of the new size and the old. */
1232          result = malloc (size);
1233          if (result == NULL)
1234            return NULL;
1235          memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1236          free (ptr);
1237        }
1238      break;
1239    }
1240
1241  return result;
1242}
1243/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1244
1245This library is free software; you can redistribute it and/or
1246modify it under the terms of the GNU Library General Public License as
1247published by the Free Software Foundation; either version 2 of the
1248License, or (at your option) any later version.
1249
1250This library is distributed in the hope that it will be useful,
1251but WITHOUT ANY WARRANTY; without even the implied warranty of
1252MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1253Library General Public License for more details.
1254
1255You should have received a copy of the GNU Library General Public
1256License along with this library; see the file COPYING.LIB.  If
1257not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1258Cambridge, MA 02139, USA.
1259
1260   The author may be reached (Email) at the address mike@ai.mit.edu,
1261   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1262
1263#ifndef _MALLOC_INTERNAL
1264#define _MALLOC_INTERNAL
1265#include <malloc.h>
1266#endif
1267
1268/* Allocate an array of NMEMB elements each SIZE bytes long.
1269   The entire array is initialized to zeros.  */
1270__ptr_t
1271calloc (nmemb, size)
1272     register __malloc_size_t nmemb;
1273     register __malloc_size_t size;
1274{
1275  register __ptr_t result = malloc (nmemb * size);
1276
1277  if (result != NULL)
1278    (void) memset (result, 0, nmemb * size);
1279
1280  return result;
1281}
1282/* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1283This file is part of the GNU C Library.
1284
1285The GNU C Library is free software; you can redistribute it and/or modify
1286it under the terms of the GNU General Public License as published by
1287the Free Software Foundation; either version 2, or (at your option)
1288any later version.
1289
1290The GNU C Library is distributed in the hope that it will be useful,
1291but WITHOUT ANY WARRANTY; without even the implied warranty of
1292MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1293GNU General Public License for more details.
1294
1295You should have received a copy of the GNU General Public License
1296along with the GNU C Library; see the file COPYING.  If not, write to
1297the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
1298
1299#ifndef _MALLOC_INTERNAL
1300#define _MALLOC_INTERNAL
1301#include <malloc.h>
1302#endif
1303
1304#ifndef __GNU_LIBRARY__
1305#define __sbrk  sbrk
1306#endif
1307
1308#ifdef __GNU_LIBRARY__
1309/* It is best not to declare this and cast its result on foreign operating
1310   systems with potentially hostile include files.  */
1311extern __ptr_t __sbrk __P ((int increment));
1312#endif
1313
1314#ifndef NULL
1315#define NULL 0
1316#endif
1317
1318/* Allocate INCREMENT more bytes of data space,
1319   and return the start of data space, or NULL on errors.
1320   If INCREMENT is negative, shrink data space.  */
1321__ptr_t
1322__default_morecore (increment)
1323#ifdef __STDC__
1324     ptrdiff_t increment;
1325#else
1326     int increment;
1327#endif
1328{
1329  __ptr_t result = (__ptr_t) __sbrk ((int) increment);
1330  if (result == (__ptr_t) -1)
1331    return NULL;
1332  return result;
1333}
1334/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1335
1336This library is free software; you can redistribute it and/or
1337modify it under the terms of the GNU Library General Public License as
1338published by the Free Software Foundation; either version 2 of the
1339License, or (at your option) any later version.
1340
1341This library is distributed in the hope that it will be useful,
1342but WITHOUT ANY WARRANTY; without even the implied warranty of
1343MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1344Library General Public License for more details.
1345
1346You should have received a copy of the GNU Library General Public
1347License along with this library; see the file COPYING.LIB.  If
1348not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1349Cambridge, MA 02139, USA.  */
1350
1351#ifndef _MALLOC_INTERNAL
1352#define _MALLOC_INTERNAL
1353#include <malloc.h>
1354#endif
1355
1356__ptr_t (*__memalign_hook) __P ((size_t __size, size_t __alignment));
1357
1358__ptr_t
1359memalign (alignment, size)
1360     __malloc_size_t alignment;
1361     __malloc_size_t size;
1362{
1363  __ptr_t result;
1364  unsigned long int adj;
1365
1366  if (__memalign_hook)
1367    return (*__memalign_hook) (alignment, size);
1368
1369  size = ((size + alignment - 1) / alignment) * alignment;
1370
1371  result = malloc (size);
1372  if (result == NULL)
1373    return NULL;
1374  adj = (unsigned long int) ((unsigned long int) ((char *) result -
1375                                                  (char *) NULL)) % alignment;
1376  if (adj != 0)
1377    {
1378      struct alignlist *l;
1379      for (l = _aligned_blocks; l != NULL; l = l->next)
1380        if (l->aligned == NULL)
1381          /* This slot is free.  Use it.  */
1382          break;
1383      if (l == NULL)
1384        {
1385          l = (struct alignlist *) malloc (sizeof (struct alignlist));
1386          if (l == NULL)
1387            {
1388              free (result);
1389              return NULL;
1390            }
1391          l->next = _aligned_blocks;
1392          _aligned_blocks = l;
1393        }
1394      l->exact = result;
1395      result = l->aligned = (char *) result + alignment - adj;
1396    }
1397
1398  return result;
1399}
1400
1401#endif /* HAVE_GMALLOC */
Note: See TracBrowser for help on using the repository browser.