source: git/Singular/gmalloc.c @ 5f89eb

spielwiese
Last change on this file since 5f89eb was 5f89eb, checked in by Olaf Bachmann <obachman@…>, 25 years ago
* delete #ifdef HAVE_CONFIG_H -- we do not need it git-svn-id: file:///usr/local/Singular/svn/trunk@2974 2c84dea3-7e68-4137-9b89-c4e89433aadc
  • Property mode set to 100644
File size: 40.7 KB
Line 
1/****************************************
2*  Computer Algebra System SINGULAR     *
3****************************************/
4/* $Id: */
5
6/* gmalloc used by Singular to have a trusted malloc and valloc
7   slightly edited to include mod2.h and to only provide its functionality
8   if HAVE_GMALLOC is defined
9*/   
10
11#include "mod2.h"
12
13#ifdef HAVE_GMALLOC
14
15
16#define _MALLOC_INTERNAL
17
18/* The malloc headers and source files from the C library follow here.  */
19
20/* Declarations for `malloc' and friends.
21   Copyright 1990, 1991, 1992, 1993, 1995 Free Software Foundation, Inc.
22                  Written May 1989 by Mike Haertel.
23
24This library is free software; you can redistribute it and/or
25modify it under the terms of the GNU Library General Public License as
26published by the Free Software Foundation; either version 2 of the
27License, or (at your option) any later version.
28
29This library is distributed in the hope that it will be useful,
30but WITHOUT ANY WARRANTY; without even the implied warranty of
31MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
32Library General Public License for more details.
33
34You should have received a copy of the GNU Library General Public
35License along with this library; see the file COPYING.LIB.  If
36not, write to the Free Software Foundation, Inc., 675 Mass Ave,
37Cambridge, MA 02139, USA.
38
39   The author may be reached (Email) at the address mike@ai.mit.edu,
40   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
41
42#ifndef _MALLOC_H
43
44#define _MALLOC_H       1
45
46#ifdef _MALLOC_INTERNAL
47
48#if     defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
49#include <string.h>
50#else
51#ifndef memset
52#define memset(s, zero, n)      bzero ((s), (n))
53#endif
54#ifndef memcpy
55#define memcpy(d, s, n)         bcopy ((s), (d), (n))
56#endif
57#endif
58
59#if     defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
60#include <limits.h>
61#else
62#ifndef CHAR_BIT
63#define CHAR_BIT        8
64#endif
65#endif
66
67#ifdef  HAVE_UNISTD_H
68#include <unistd.h>
69#endif
70
71#endif  /* _MALLOC_INTERNAL.  */
72
73
74#ifdef  __cplusplus
75extern "C"
76{
77#endif
78
79#if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
80#undef  __P
81#define __P(args)       args
82#undef  __ptr_t
83#define __ptr_t         void *
84#else /* Not C++ or ANSI C.  */
85#undef  __P
86#define __P(args)       ()
87#undef  const
88#define const
89#undef  __ptr_t
90#define __ptr_t         char *
91#endif /* C++ or ANSI C.  */
92
93#if defined (__STDC__) && __STDC__
94#include <stddef.h>
95#define __malloc_size_t         size_t
96#define __malloc_ptrdiff_t      ptrdiff_t
97#else
98#define __malloc_size_t         unsigned int
99#define __malloc_ptrdiff_t      int
100#endif
101
102#ifndef NULL
103#define NULL    0
104#endif
105
106
107/* Allocate SIZE bytes of memory.  */
108extern __ptr_t malloc __P ((__malloc_size_t __size));
109/* Re-allocate the previously allocated block
110   in __ptr_t, making the new block SIZE bytes long.  */
111extern __ptr_t realloc __P ((__ptr_t __ptr, __malloc_size_t __size));
112/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
113extern __ptr_t calloc __P ((__malloc_size_t __nmemb, __malloc_size_t __size));
114/* Free a block allocated by `malloc', `realloc' or `calloc'.  */
115extern void free __P ((__ptr_t __ptr));
116
117/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
118extern __ptr_t memalign __P ((__malloc_size_t __alignment,
119                              __malloc_size_t __size));
120
121/* Allocate SIZE bytes on a page boundary.  */
122extern __ptr_t valloc __P ((__malloc_size_t __size));
123
124
125#ifdef _MALLOC_INTERNAL
126
127/* The allocator divides the heap into blocks of fixed size; large
128   requests receive one or more whole blocks, and small requests
129   receive a fragment of a block.  Fragment sizes are powers of two,
130   and all fragments of a block are the same size.  When all the
131   fragments in a block have been freed, the block itself is freed.  */
132#define INT_BIT         (CHAR_BIT * sizeof(int))
133#define BLOCKLOG        (INT_BIT > 16 ? 12 : 9)
134#define BLOCKSIZE       (1 << BLOCKLOG)
135#define BLOCKIFY(SIZE)  (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
136
137/* Determine the amount of memory spanned by the initial heap table
138   (not an absolute limit).  */
139#define HEAP            (INT_BIT > 16 ? 4194304 : 65536)
140
141/* Number of contiguous free blocks allowed to build up at the end of
142   memory before they will be returned to the system.  */
143#define FINAL_FREE_BLOCKS       8
144
145/* Data structure giving per-block information.  */
146typedef union
147  {
148    /* Heap information for a busy block.  */
149    struct
150      {
151        /* Zero for a large (multiblock) object, or positive giving the
152           logarithm to the base two of the fragment size.  */
153        int type;
154        union
155          {
156            struct
157              {
158                __malloc_size_t nfree; /* Free frags in a fragmented block.  */
159                __malloc_size_t first; /* First free fragment of the block.  */
160              } frag;
161            /* For a large object, in its first block, this has the number
162               of blocks in the object.  In the other blocks, this has a
163               negative number which says how far back the first block is.  */
164            __malloc_ptrdiff_t size;
165          } info;
166      } busy;
167    /* Heap information for a free block
168       (that may be the first of a free cluster).  */
169    struct
170      {
171        __malloc_size_t size;   /* Size (in blocks) of a free cluster.  */
172        __malloc_size_t next;   /* Index of next free cluster.  */
173        __malloc_size_t prev;   /* Index of previous free cluster.  */
174      } free;
175  } malloc_info;
176
177/* Pointer to first block of the heap.  */
178extern char *_heapbase;
179
180/* Table indexed by block number giving per-block information.  */
181extern malloc_info *_heapinfo;
182
183/* Address to block number and vice versa.  */
184#define BLOCK(A)        (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
185#define ADDRESS(B)      ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
186
187/* Current search index for the heap table.  */
188extern __malloc_size_t _heapindex;
189
190/* Limit of valid info table indices.  */
191extern __malloc_size_t _heaplimit;
192
193/* Doubly linked lists of free fragments.  */
194struct list
195  {
196    struct list *next;
197    struct list *prev;
198  };
199
200/* Free list headers for each fragment size.  */
201extern struct list _fraghead[];
202
203/* List of blocks allocated with `memalign' (or `valloc').  */
204struct alignlist
205  {
206    struct alignlist *next;
207    __ptr_t aligned;            /* The address that memaligned returned.  */
208    __ptr_t exact;              /* The address that malloc returned.  */
209  };
210extern struct alignlist *_aligned_blocks;
211
212/* Instrumentation.  */
213extern __malloc_size_t _chunks_used;
214extern __malloc_size_t _bytes_used;
215extern __malloc_size_t _chunks_free;
216extern __malloc_size_t _bytes_free;
217
218/* Internal version of `free' used in `morecore' (malloc.c). */
219extern void _free_internal __P ((__ptr_t __ptr));
220
221#endif /* _MALLOC_INTERNAL.  */
222
223/* Given an address in the middle of a malloc'd object,
224   return the address of the beginning of the object.  */
225extern __ptr_t malloc_find_object_address __P ((__ptr_t __ptr));
226
227/* Underlying allocation function; successive calls should
228   return contiguous pieces of memory.  */
229extern __ptr_t (*__morecore) __P ((__malloc_ptrdiff_t __size));
230
231/* Default value of `__morecore'.  */
232extern __ptr_t __default_morecore __P ((__malloc_ptrdiff_t __size));
233
234/* If not NULL, this function is called after each time
235   `__morecore' is called to increase the data size.  */
236extern void (*__after_morecore_hook) __P ((void));
237
238/* Nonzero if `malloc' has been called and done its initialization.  */
239extern int __malloc_initialized;
240
241/* Hooks for debugging versions.  */
242extern void (*__malloc_initialize_hook) __P ((void));
243extern void (*__free_hook) __P ((__ptr_t __ptr));
244extern __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
245extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
246extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size,
247                                        __malloc_size_t __alignment));
248
249/* Return values for `mprobe': these are the kinds of inconsistencies that
250   `mcheck' enables detection of.  */
251enum mcheck_status
252  {
253    MCHECK_DISABLED = -1,       /* Consistency checking is not turned on.  */
254    MCHECK_OK,                  /* Block is fine.  */
255    MCHECK_FREE,                /* Block freed twice.  */
256    MCHECK_HEAD,                /* Memory before the block was clobbered.  */
257    MCHECK_TAIL                 /* Memory after the block was clobbered.  */
258  };
259
260/* Activate a standard collection of debugging hooks.  This must be called
261   before `malloc' is ever called.  ABORTFUNC is called with an error code
262   (see enum above) when an inconsistency is detected.  If ABORTFUNC is
263   null, the standard function prints on stderr and then calls `abort'.  */
264extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
265
266/* Check for aberrations in a particular malloc'd block.  You must have
267   called `mcheck' already.  These are the same checks that `mcheck' does
268   when you free or reallocate a block.  */
269extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
270
271/* Activate a standard collection of tracing hooks.  */
272extern void mtrace __P ((void));
273extern void muntrace __P ((void));
274
275/* Statistics available to the user.  */
276struct mstats
277  {
278    __malloc_size_t bytes_total; /* Total size of the heap. */
279    __malloc_size_t chunks_used; /* Chunks allocated by the user. */
280    __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
281    __malloc_size_t chunks_free; /* Chunks in the free list. */
282    __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
283  };
284
285/* Pick up the current statistics. */
286extern struct mstats mstats __P ((void));
287
288/* Call WARNFUN with a warning message when memory usage is high.  */
289extern void memory_warnings __P ((__ptr_t __start,
290                                  void (*__warnfun) __P ((const char *))));
291
292
293/* Relocating allocator.  */
294
295/* Allocate SIZE bytes, and store the address in *HANDLEPTR.  */
296extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
297
298/* Free the storage allocated in HANDLEPTR.  */
299extern void r_alloc_free __P ((__ptr_t *__handleptr));
300
301/* Adjust the block at HANDLEPTR to be SIZE bytes long.  */
302extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
303
304
305#ifdef  __cplusplus
306}
307#endif
308
309#endif /* malloc.h  */
310/* Allocate memory on a page boundary.
311   Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
312
313This library is free software; you can redistribute it and/or
314modify it under the terms of the GNU Library General Public License as
315published by the Free Software Foundation; either version 2 of the
316License, or (at your option) any later version.
317
318This library is distributed in the hope that it will be useful,
319but WITHOUT ANY WARRANTY; without even the implied warranty of
320MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
321Library General Public License for more details.
322
323You should have received a copy of the GNU Library General Public
324License along with this library; see the file COPYING.LIB.  If
325not, write to the Free Software Foundation, Inc., 675 Mass Ave,
326Cambridge, MA 02139, USA.
327
328   The author may be reached (Email) at the address mike@ai.mit.edu,
329   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
330
331#if defined (__GNU_LIBRARY__) || defined (_LIBC)
332#include <stddef.h>
333#include <sys/cdefs.h>
334extern size_t __getpagesize __P ((void));
335#else
336#if 0 /* obachman: pasted in getpagesize.h manually */
337#include "getpagesize.h"
338#else
339
340#ifdef VMS
341#define getpagesize() 512
342#endif
343
344#ifdef HAVE_UNISTD_H
345#include <unistd.h>
346#endif
347
348#ifdef _SC_PAGESIZE
349#define getpagesize() sysconf(_SC_PAGESIZE)
350#else
351
352#include <sys/param.h>
353
354#ifdef EXEC_PAGESIZE
355#define getpagesize() EXEC_PAGESIZE
356#else
357#ifdef NBPG
358#define getpagesize() NBPG * CLSIZE
359#ifndef CLSIZE
360#define CLSIZE 1
361#endif /* no CLSIZE */
362#else /* no NBPG */
363#ifdef NBPC
364#define getpagesize() NBPC
365#else /* no NBPC */
366#ifdef PAGESIZE
367#define getpagesize() PAGESIZE
368#endif
369#endif /* NBPC */
370#endif /* no NBPG */
371#endif /* no EXEC_PAGESIZE */
372#endif /* no _SC_PAGESIZE */
373
374#define  __getpagesize()        getpagesize()
375#endif /* if 0 */
376#endif
377
378#ifndef _MALLOC_INTERNAL
379#define _MALLOC_INTERNAL
380#include <malloc.h>
381#endif
382
383static __malloc_size_t pagesize;
384
385__ptr_t
386valloc (size)
387     __malloc_size_t size;
388{
389  if (pagesize == 0)
390    pagesize = __getpagesize ();
391
392  return memalign (pagesize, size);
393}
394/* Memory allocator `malloc'.
395   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
396                  Written May 1989 by Mike Haertel.
397
398This library is free software; you can redistribute it and/or
399modify it under the terms of the GNU Library General Public License as
400published by the Free Software Foundation; either version 2 of the
401License, or (at your option) any later version.
402
403This library is distributed in the hope that it will be useful,
404but WITHOUT ANY WARRANTY; without even the implied warranty of
405MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
406Library General Public License for more details.
407
408You should have received a copy of the GNU Library General Public
409License along with this library; see the file COPYING.LIB.  If
410not, write to the Free Software Foundation, Inc., 675 Mass Ave,
411Cambridge, MA 02139, USA.
412
413   The author may be reached (Email) at the address mike@ai.mit.edu,
414   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
415
416#ifndef _MALLOC_INTERNAL
417#define _MALLOC_INTERNAL
418#include <malloc.h>
419#endif
420
421/* How to really get more memory.  */
422__ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
423
424/* Debugging hook for `malloc'.  */
425__ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
426
427/* Pointer to the base of the first block.  */
428char *_heapbase;
429
430/* Block information table.  Allocated with align/__free (not malloc/free).  */
431malloc_info *_heapinfo;
432
433/* Number of info entries.  */
434static __malloc_size_t heapsize;
435
436/* Search index in the info table.  */
437__malloc_size_t _heapindex;
438
439/* Limit of valid info table indices.  */
440__malloc_size_t _heaplimit;
441
442/* Free lists for each fragment size.  */
443struct list _fraghead[BLOCKLOG];
444
445/* Instrumentation.  */
446__malloc_size_t _chunks_used;
447__malloc_size_t _bytes_used;
448__malloc_size_t _chunks_free;
449__malloc_size_t _bytes_free;
450
451/* Are you experienced?  */
452int __malloc_initialized;
453
454void (*__malloc_initialize_hook) __P ((void));
455void (*__after_morecore_hook) __P ((void));
456
457/* Aligned allocation.  */
458static __ptr_t align __P ((__malloc_size_t));
459static __ptr_t
460align (size)
461     __malloc_size_t size;
462{
463  __ptr_t result;
464  unsigned long int adj;
465
466  result = (*__morecore) (size);
467  adj = (unsigned long int) ((unsigned long int) ((char *) result -
468                                                  (char *) NULL)) % BLOCKSIZE;
469  if (adj != 0)
470    {
471      adj = BLOCKSIZE - adj;
472      (void) (*__morecore) (adj);
473      result = (char *) result + adj;
474    }
475
476  if (__after_morecore_hook)
477    (*__after_morecore_hook) ();
478
479  return result;
480}
481
482/* Set everything up and remember that we have.  */
483static int initialize __P ((void));
484static int
485initialize ()
486{
487  if (__malloc_initialize_hook)
488    (*__malloc_initialize_hook) ();
489
490  heapsize = HEAP / BLOCKSIZE;
491  _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
492  if (_heapinfo == NULL)
493    return 0;
494  memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
495  _heapinfo[0].free.size = 0;
496  _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
497  _heapindex = 0;
498  _heapbase = (char *) _heapinfo;
499
500  /* Account for the _heapinfo block itself in the statistics.  */
501  _bytes_used = heapsize * sizeof (malloc_info);
502  _chunks_used = 1;
503
504  __malloc_initialized = 1;
505  return 1;
506}
507
508/* Get neatly aligned memory, initializing or
509   growing the heap info table as necessary. */
510static __ptr_t morecore __P ((__malloc_size_t));
511static __ptr_t
512morecore (size)
513     __malloc_size_t size;
514{
515  __ptr_t result;
516  malloc_info *newinfo, *oldinfo;
517  __malloc_size_t newsize;
518
519  result = align (size);
520  if (result == NULL)
521    return NULL;
522
523  /* Check if we need to grow the info table.  */
524  if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
525    {
526      newsize = heapsize;
527      while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
528        newsize *= 2;
529      newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
530      if (newinfo == NULL)
531        {
532          (*__morecore) (-size);
533          return NULL;
534        }
535      memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
536      memset (&newinfo[heapsize], 0,
537              (newsize - heapsize) * sizeof (malloc_info));
538      oldinfo = _heapinfo;
539      newinfo[BLOCK (oldinfo)].busy.type = 0;
540      newinfo[BLOCK (oldinfo)].busy.info.size
541        = BLOCKIFY (heapsize * sizeof (malloc_info));
542      _heapinfo = newinfo;
543      /* Account for the _heapinfo block itself in the statistics.  */
544      _bytes_used += newsize * sizeof (malloc_info);
545      ++_chunks_used;
546      _free_internal (oldinfo);
547      heapsize = newsize;
548    }
549
550  _heaplimit = BLOCK ((char *) result + size);
551  return result;
552}
553
554/* Allocate memory from the heap.  */
555__ptr_t
556malloc (size)
557     __malloc_size_t size;
558{
559  __ptr_t result;
560  __malloc_size_t block, blocks, lastblocks, start;
561  register __malloc_size_t i;
562  struct list *next;
563
564  /* ANSI C allows `malloc (0)' to either return NULL, or to return a
565     valid address you can realloc and free (though not dereference).
566
567     It turns out that some extant code (sunrpc, at least Ultrix's version)
568     expects `malloc (0)' to return non-NULL and breaks otherwise.
569     Be compatible.  */
570
571#if     0
572  if (size == 0)
573    return NULL;
574#endif
575
576  if (__malloc_hook != NULL)
577    return (*__malloc_hook) (size);
578
579  if (!__malloc_initialized)
580    if (!initialize ())
581      return NULL;
582
583  if (size < sizeof (struct list))
584    size = sizeof (struct list);
585
586#ifdef SUNOS_LOCALTIME_BUG
587  if (size < 16)
588    size = 16;
589#endif
590
591  /* Determine the allocation policy based on the request size.  */
592  if (size <= BLOCKSIZE / 2)
593    {
594      /* Small allocation to receive a fragment of a block.
595         Determine the logarithm to base two of the fragment size. */
596      register __malloc_size_t log = 1;
597      --size;
598      while ((size /= 2) != 0)
599        ++log;
600
601      /* Look in the fragment lists for a
602         free fragment of the desired size. */
603      next = _fraghead[log].next;
604      if (next != NULL)
605        {
606          /* There are free fragments of this size.
607             Pop a fragment out of the fragment list and return it.
608             Update the block's nfree and first counters. */
609          result = (__ptr_t) next;
610          next->prev->next = next->next;
611          if (next->next != NULL)
612            next->next->prev = next->prev;
613          block = BLOCK (result);
614          if (--_heapinfo[block].busy.info.frag.nfree != 0)
615            _heapinfo[block].busy.info.frag.first = (unsigned long int)
616              ((unsigned long int) ((char *) next->next - (char *) NULL)
617               % BLOCKSIZE) >> log;
618
619          /* Update the statistics.  */
620          ++_chunks_used;
621          _bytes_used += 1 << log;
622          --_chunks_free;
623          _bytes_free -= 1 << log;
624        }
625      else
626        {
627          /* No free fragments of the desired size, so get a new block
628             and break it into fragments, returning the first.  */
629          result = malloc (BLOCKSIZE);
630          if (result == NULL)
631            return NULL;
632
633          /* Link all fragments but the first into the free list.  */
634          for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
635            {
636              next = (struct list *) ((char *) result + (i << log));
637              next->next = _fraghead[log].next;
638              next->prev = &_fraghead[log];
639              next->prev->next = next;
640              if (next->next != NULL)
641                next->next->prev = next;
642            }
643
644          /* Initialize the nfree and first counters for this block.  */
645          block = BLOCK (result);
646          _heapinfo[block].busy.type = log;
647          _heapinfo[block].busy.info.frag.nfree = i - 1;
648          _heapinfo[block].busy.info.frag.first = i - 1;
649
650          _chunks_free += (BLOCKSIZE >> log) - 1;
651          _bytes_free += BLOCKSIZE - (1 << log);
652          _bytes_used -= BLOCKSIZE - (1 << log);
653        }
654    }
655  else
656    {
657      /* Large allocation to receive one or more blocks.
658         Search the free list in a circle starting at the last place visited.
659         If we loop completely around without finding a large enough
660         space we will have to get more memory from the system.  */
661      blocks = BLOCKIFY (size);
662      start = block = _heapindex;
663      while (_heapinfo[block].free.size < blocks)
664        {
665          block = _heapinfo[block].free.next;
666          if (block == start)
667            {
668              /* Need to get more from the system.  Check to see if
669                 the new core will be contiguous with the final free
670                 block; if so we don't need to get as much.  */
671              block = _heapinfo[0].free.prev;
672              lastblocks = _heapinfo[block].free.size;
673              if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
674                  (*__morecore) (0) == ADDRESS (block + lastblocks) &&
675                  (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
676                {
677                  /* Which block we are extending (the `final free
678                     block' referred to above) might have changed, if
679                     it got combined with a freed info table.  */
680                  block = _heapinfo[0].free.prev;
681                  _heapinfo[block].free.size += (blocks - lastblocks);
682                  _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
683                  continue;
684                }
685              result = morecore (blocks * BLOCKSIZE);
686              if (result == NULL)
687                return NULL;
688              block = BLOCK (result);
689              _heapinfo[block].busy.type = 0;
690              _heapinfo[block].busy.info.size = blocks;
691              ++_chunks_used;
692              _bytes_used += blocks * BLOCKSIZE;
693              return result;
694            }
695        }
696
697      /* At this point we have found a suitable free list entry.
698         Figure out how to remove what we need from the list. */
699      result = ADDRESS (block);
700      if (_heapinfo[block].free.size > blocks)
701        {
702          /* The block we found has a bit left over,
703             so relink the tail end back into the free list. */
704          _heapinfo[block + blocks].free.size
705            = _heapinfo[block].free.size - blocks;
706          _heapinfo[block + blocks].free.next
707            = _heapinfo[block].free.next;
708          _heapinfo[block + blocks].free.prev
709            = _heapinfo[block].free.prev;
710          _heapinfo[_heapinfo[block].free.prev].free.next
711            = _heapinfo[_heapinfo[block].free.next].free.prev
712            = _heapindex = block + blocks;
713        }
714      else
715        {
716          /* The block exactly matches our requirements,
717             so just remove it from the list. */
718          _heapinfo[_heapinfo[block].free.next].free.prev
719            = _heapinfo[block].free.prev;
720          _heapinfo[_heapinfo[block].free.prev].free.next
721            = _heapindex = _heapinfo[block].free.next;
722          --_chunks_free;
723        }
724
725      _heapinfo[block].busy.type = 0;
726      _heapinfo[block].busy.info.size = blocks;
727      ++_chunks_used;
728      _bytes_used += blocks * BLOCKSIZE;
729      _bytes_free -= blocks * BLOCKSIZE;
730
731      /* Mark all the blocks of the object just allocated except for the
732         first with a negative number so you can find the first block by
733         adding that adjustment.  */
734      while (--blocks > 0)
735        _heapinfo[block + blocks].busy.info.size = -blocks;
736    }
737
738  return result;
739}
740
741#ifndef _LIBC
742
743/* On some ANSI C systems, some libc functions call _malloc, _free
744   and _realloc.  Make them use the GNU functions.  */
745
746__ptr_t
747_malloc (size)
748     __malloc_size_t size;
749{
750  return malloc (size);
751}
752
753void
754_free (ptr)
755     __ptr_t ptr;
756{
757  free (ptr);
758}
759
760__ptr_t
761_realloc (ptr, size)
762     __ptr_t ptr;
763     __malloc_size_t size;
764{
765  return realloc (ptr, size);
766}
767
768#endif
769/* Free a block of memory allocated by `malloc'.
770   Copyright 1990, 1991, 1992, 1994 Free Software Foundation, Inc.
771                  Written May 1989 by Mike Haertel.
772
773This library is free software; you can redistribute it and/or
774modify it under the terms of the GNU Library General Public License as
775published by the Free Software Foundation; either version 2 of the
776License, or (at your option) any later version.
777
778This library is distributed in the hope that it will be useful,
779but WITHOUT ANY WARRANTY; without even the implied warranty of
780MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
781Library General Public License for more details.
782
783You should have received a copy of the GNU Library General Public
784License along with this library; see the file COPYING.LIB.  If
785not, write to the Free Software Foundation, Inc., 675 Mass Ave,
786Cambridge, MA 02139, USA.
787
788   The author may be reached (Email) at the address mike@ai.mit.edu,
789   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
790
791#ifndef _MALLOC_INTERNAL
792#define _MALLOC_INTERNAL
793#include <malloc.h>
794#endif
795
796/* Debugging hook for free.  */
797void (*__free_hook) __P ((__ptr_t __ptr));
798
799/* List of blocks allocated by memalign.  */
800struct alignlist *_aligned_blocks = NULL;
801
802/* Return memory to the heap.
803   Like `free' but don't call a __free_hook if there is one.  */
804void
805_free_internal (ptr)
806     __ptr_t ptr;
807{
808  int type;
809  __malloc_size_t block, blocks;
810  register __malloc_size_t i;
811  struct list *prev, *next;
812
813  block = BLOCK (ptr);
814
815  type = _heapinfo[block].busy.type;
816  switch (type)
817    {
818    case 0:
819      /* Get as many statistics as early as we can.  */
820      --_chunks_used;
821      _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
822      _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
823
824      /* Find the free cluster previous to this one in the free list.
825         Start searching at the last block referenced; this may benefit
826         programs with locality of allocation.  */
827      i = _heapindex;
828      if (i > block)
829        while (i > block)
830          i = _heapinfo[i].free.prev;
831      else
832        {
833          do
834            i = _heapinfo[i].free.next;
835          while (i > 0 && i < block);
836          i = _heapinfo[i].free.prev;
837        }
838
839      /* Determine how to link this block into the free list.  */
840      if (block == i + _heapinfo[i].free.size)
841        {
842          /* Coalesce this block with its predecessor.  */
843          _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
844          block = i;
845        }
846      else
847        {
848          /* Really link this block back into the free list.  */
849          _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
850          _heapinfo[block].free.next = _heapinfo[i].free.next;
851          _heapinfo[block].free.prev = i;
852          _heapinfo[i].free.next = block;
853          _heapinfo[_heapinfo[block].free.next].free.prev = block;
854          ++_chunks_free;
855        }
856
857      /* Now that the block is linked in, see if we can coalesce it
858         with its successor (by deleting its successor from the list
859         and adding in its size).  */
860      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
861        {
862          _heapinfo[block].free.size
863            += _heapinfo[_heapinfo[block].free.next].free.size;
864          _heapinfo[block].free.next
865            = _heapinfo[_heapinfo[block].free.next].free.next;
866          _heapinfo[_heapinfo[block].free.next].free.prev = block;
867          --_chunks_free;
868        }
869
870      /* Now see if we can return stuff to the system.  */
871      blocks = _heapinfo[block].free.size;
872      if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
873          && (*__morecore) (0) == ADDRESS (block + blocks))
874        {
875          register __malloc_size_t bytes = blocks * BLOCKSIZE;
876          _heaplimit -= blocks;
877          (*__morecore) (-bytes);
878          _heapinfo[_heapinfo[block].free.prev].free.next
879            = _heapinfo[block].free.next;
880          _heapinfo[_heapinfo[block].free.next].free.prev
881            = _heapinfo[block].free.prev;
882          block = _heapinfo[block].free.prev;
883          --_chunks_free;
884          _bytes_free -= bytes;
885        }
886
887      /* Set the next search to begin at this block.  */
888      _heapindex = block;
889      break;
890
891    default:
892      /* Do some of the statistics.  */
893      --_chunks_used;
894      _bytes_used -= 1 << type;
895      ++_chunks_free;
896      _bytes_free += 1 << type;
897
898      /* Get the address of the first free fragment in this block.  */
899      prev = (struct list *) ((char *) ADDRESS (block) +
900                           (_heapinfo[block].busy.info.frag.first << type));
901
902      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
903        {
904          /* If all fragments of this block are free, remove them
905             from the fragment list and free the whole block.  */
906          next = prev;
907          for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
908            next = next->next;
909          prev->prev->next = next;
910          if (next != NULL)
911            next->prev = prev->prev;
912          _heapinfo[block].busy.type = 0;
913          _heapinfo[block].busy.info.size = 1;
914
915          /* Keep the statistics accurate.  */
916          ++_chunks_used;
917          _bytes_used += BLOCKSIZE;
918          _chunks_free -= BLOCKSIZE >> type;
919          _bytes_free -= BLOCKSIZE;
920
921          free (ADDRESS (block));
922        }
923      else if (_heapinfo[block].busy.info.frag.nfree != 0)
924        {
925          /* If some fragments of this block are free, link this
926             fragment into the fragment list after the first free
927             fragment of this block. */
928          next = (struct list *) ptr;
929          next->next = prev->next;
930          next->prev = prev;
931          prev->next = next;
932          if (next->next != NULL)
933            next->next->prev = next;
934          ++_heapinfo[block].busy.info.frag.nfree;
935        }
936      else
937        {
938          /* No fragments of this block are free, so link this
939             fragment into the fragment list and announce that
940             it is the first free fragment of this block. */
941          prev = (struct list *) ptr;
942          _heapinfo[block].busy.info.frag.nfree = 1;
943          _heapinfo[block].busy.info.frag.first = (unsigned long int)
944            ((unsigned long int) ((char *) ptr - (char *) NULL)
945             % BLOCKSIZE >> type);
946          prev->next = _fraghead[type].next;
947          prev->prev = &_fraghead[type];
948          prev->prev->next = prev;
949          if (prev->next != NULL)
950            prev->next->prev = prev;
951        }
952      break;
953    }
954}
955
956/* Return memory to the heap.  */
957void
958free (ptr)
959     __ptr_t ptr;
960{
961  register struct alignlist *l;
962
963  if (ptr == NULL)
964    return;
965
966  for (l = _aligned_blocks; l != NULL; l = l->next)
967    if (l->aligned == ptr)
968      {
969        l->aligned = NULL;      /* Mark the slot in the list as free.  */
970        ptr = l->exact;
971        break;
972      }
973
974  if (__free_hook != NULL)
975    (*__free_hook) (ptr);
976  else
977    _free_internal (ptr);
978}
979/* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
980This file is part of the GNU C Library.
981
982The GNU C Library is free software; you can redistribute it and/or
983modify it under the terms of the GNU Library General Public License as
984published by the Free Software Foundation; either version 2 of the
985License, or (at your option) any later version.
986
987The GNU C Library is distributed in the hope that it will be useful,
988but WITHOUT ANY WARRANTY; without even the implied warranty of
989MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
990Library General Public License for more details.
991
992You should have received a copy of the GNU Library General Public
993License along with the GNU C Library; see the file COPYING.LIB.  If
994not, write to the Free Software Foundation, Inc., 675 Mass Ave,
995Cambridge, MA 02139, USA.  */
996
997#ifndef _MALLOC_INTERNAL
998#define _MALLOC_INTERNAL
999#include <malloc.h>
1000#endif
1001
1002#ifdef _LIBC
1003
1004#include <ansidecl.h>
1005#include <gnu-stabs.h>
1006
1007#undef  cfree
1008
1009function_alias(cfree, free, void, (ptr),
1010               DEFUN(cfree, (ptr), PTR ptr))
1011
1012#else
1013
1014void
1015cfree (ptr)
1016     __ptr_t ptr;
1017{
1018  free (ptr);
1019}
1020
1021#endif
1022/* Change the size of a block allocated by `malloc'.
1023   Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1024                     Written May 1989 by Mike Haertel.
1025
1026This library is free software; you can redistribute it and/or
1027modify it under the terms of the GNU Library General Public License as
1028published by the Free Software Foundation; either version 2 of the
1029License, or (at your option) any later version.
1030
1031This library is distributed in the hope that it will be useful,
1032but WITHOUT ANY WARRANTY; without even the implied warranty of
1033MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1034Library General Public License for more details.
1035
1036You should have received a copy of the GNU Library General Public
1037License along with this library; see the file COPYING.LIB.  If
1038not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1039Cambridge, MA 02139, USA.
1040
1041   The author may be reached (Email) at the address mike@ai.mit.edu,
1042   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1043
1044#ifndef _MALLOC_INTERNAL
1045#define _MALLOC_INTERNAL
1046#include <malloc.h>
1047#endif
1048
1049#if  (defined (MEMMOVE_MISSING) || \
1050      !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1051
1052/* Snarfed directly from Emacs src/dispnew.c:
1053   XXX Should use system bcopy if it handles overlap.  */
1054#ifndef emacs
1055
1056/* Like bcopy except never gets confused by overlap.  */
1057
1058static void
1059safe_bcopy (from, to, size)
1060     char *from, *to;
1061     int size;
1062{
1063  if (size <= 0 || from == to)
1064    return;
1065
1066  /* If the source and destination don't overlap, then bcopy can
1067     handle it.  If they do overlap, but the destination is lower in
1068     memory than the source, we'll assume bcopy can handle that.  */
1069  if (to < from || from + size <= to)
1070    bcopy (from, to, size);
1071
1072  /* Otherwise, we'll copy from the end.  */
1073  else
1074    {
1075      register char *endf = from + size;
1076      register char *endt = to + size;
1077
1078      /* If TO - FROM is large, then we should break the copy into
1079         nonoverlapping chunks of TO - FROM bytes each.  However, if
1080         TO - FROM is small, then the bcopy function call overhead
1081         makes this not worth it.  The crossover point could be about
1082         anywhere.  Since I don't think the obvious copy loop is too
1083         bad, I'm trying to err in its favor.  */
1084      if (to - from < 64)
1085        {
1086          do
1087            *--endt = *--endf;
1088          while (endf != from);
1089        }
1090      else
1091        {
1092          for (;;)
1093            {
1094              endt -= (to - from);
1095              endf -= (to - from);
1096
1097              if (endt < to)
1098                break;
1099
1100              bcopy (endf, endt, to - from);
1101            }
1102
1103          /* If SIZE wasn't a multiple of TO - FROM, there will be a
1104             little left over.  The amount left over is
1105             (endt + (to - from)) - to, which is endt - from.  */
1106          bcopy (from, to, endt - from);
1107        }
1108    }
1109}     
1110#endif  /* Not emacs.  */
1111
1112#define memmove(to, from, size) safe_bcopy ((from), (to), (size))
1113
1114#endif
1115
1116
1117#define min(A, B) ((A) < (B) ? (A) : (B))
1118
1119/* Debugging hook for realloc.  */
1120__ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
1121
1122/* Resize the given region to the new size, returning a pointer
1123   to the (possibly moved) region.  This is optimized for speed;
1124   some benchmarks seem to indicate that greater compactness is
1125   achieved by unconditionally allocating and copying to a
1126   new region.  This module has incestuous knowledge of the
1127   internals of both free and malloc. */
1128__ptr_t
1129realloc (ptr, size)
1130     __ptr_t ptr;
1131     __malloc_size_t size;
1132{
1133  __ptr_t result;
1134  int type;
1135  __malloc_size_t block, blocks, oldlimit;
1136
1137  if (size == 0)
1138    {
1139      free (ptr);
1140      return malloc (0);
1141    }
1142  else if (ptr == NULL)
1143    return malloc (size);
1144
1145  if (__realloc_hook != NULL)
1146    return (*__realloc_hook) (ptr, size);
1147
1148  block = BLOCK (ptr);
1149
1150  type = _heapinfo[block].busy.type;
1151  switch (type)
1152    {
1153    case 0:
1154      /* Maybe reallocate a large block to a small fragment.  */
1155      if (size <= BLOCKSIZE / 2)
1156        {
1157          result = malloc (size);
1158          if (result != NULL)
1159            {
1160              memcpy (result, ptr, size);
1161              _free_internal (ptr);
1162              return result;
1163            }
1164        }
1165
1166      /* The new size is a large allocation as well;
1167         see if we can hold it in place. */
1168      blocks = BLOCKIFY (size);
1169      if (blocks < _heapinfo[block].busy.info.size)
1170        {
1171          /* The new size is smaller; return
1172             excess memory to the free list. */
1173          _heapinfo[block + blocks].busy.type = 0;
1174          _heapinfo[block + blocks].busy.info.size
1175            = _heapinfo[block].busy.info.size - blocks;
1176          _heapinfo[block].busy.info.size = blocks;
1177          /* We have just created a new chunk by splitting a chunk in two.
1178             Now we will free this chunk; increment the statistics counter
1179             so it doesn't become wrong when _free_internal decrements it.  */
1180          ++_chunks_used;
1181          _free_internal (ADDRESS (block + blocks));
1182          result = ptr;
1183        }
1184      else if (blocks == _heapinfo[block].busy.info.size)
1185        /* No size change necessary.  */
1186        result = ptr;
1187      else
1188        {
1189          /* Won't fit, so allocate a new region that will.
1190             Free the old region first in case there is sufficient
1191             adjacent free space to grow without moving. */
1192          blocks = _heapinfo[block].busy.info.size;
1193          /* Prevent free from actually returning memory to the system.  */
1194          oldlimit = _heaplimit;
1195          _heaplimit = 0;
1196          _free_internal (ptr);
1197          _heaplimit = oldlimit;
1198          result = malloc (size);
1199          if (result == NULL)
1200            {
1201              /* Now we're really in trouble.  We have to unfree
1202                 the thing we just freed.  Unfortunately it might
1203                 have been coalesced with its neighbors.  */
1204              if (_heapindex == block)
1205                (void) malloc (blocks * BLOCKSIZE);
1206              else
1207                {
1208                  __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
1209                  (void) malloc (blocks * BLOCKSIZE);
1210                  _free_internal (previous);
1211                }
1212              return NULL;
1213            }
1214          if (ptr != result)
1215            memmove (result, ptr, blocks * BLOCKSIZE);
1216        }
1217      break;
1218
1219    default:
1220      /* Old size is a fragment; type is logarithm
1221         to base two of the fragment size.  */
1222      if (size > (__malloc_size_t) (1 << (type - 1)) &&
1223          size <= (__malloc_size_t) (1 << type))
1224        /* The new size is the same kind of fragment.  */
1225        result = ptr;
1226      else
1227        {
1228          /* The new size is different; allocate a new space,
1229             and copy the lesser of the new size and the old. */
1230          result = malloc (size);
1231          if (result == NULL)
1232            return NULL;
1233          memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1234          free (ptr);
1235        }
1236      break;
1237    }
1238
1239  return result;
1240}
1241/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1242
1243This library is free software; you can redistribute it and/or
1244modify it under the terms of the GNU Library General Public License as
1245published by the Free Software Foundation; either version 2 of the
1246License, or (at your option) any later version.
1247
1248This library is distributed in the hope that it will be useful,
1249but WITHOUT ANY WARRANTY; without even the implied warranty of
1250MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1251Library General Public License for more details.
1252
1253You should have received a copy of the GNU Library General Public
1254License along with this library; see the file COPYING.LIB.  If
1255not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1256Cambridge, MA 02139, USA.
1257
1258   The author may be reached (Email) at the address mike@ai.mit.edu,
1259   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1260
1261#ifndef _MALLOC_INTERNAL
1262#define _MALLOC_INTERNAL
1263#include <malloc.h>
1264#endif
1265
1266/* Allocate an array of NMEMB elements each SIZE bytes long.
1267   The entire array is initialized to zeros.  */
1268__ptr_t
1269calloc (nmemb, size)
1270     register __malloc_size_t nmemb;
1271     register __malloc_size_t size;
1272{
1273  register __ptr_t result = malloc (nmemb * size);
1274
1275  if (result != NULL)
1276    (void) memset (result, 0, nmemb * size);
1277
1278  return result;
1279}
1280/* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1281This file is part of the GNU C Library.
1282
1283The GNU C Library is free software; you can redistribute it and/or modify
1284it under the terms of the GNU General Public License as published by
1285the Free Software Foundation; either version 2, or (at your option)
1286any later version.
1287
1288The GNU C Library is distributed in the hope that it will be useful,
1289but WITHOUT ANY WARRANTY; without even the implied warranty of
1290MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1291GNU General Public License for more details.
1292
1293You should have received a copy of the GNU General Public License
1294along with the GNU C Library; see the file COPYING.  If not, write to
1295the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
1296
1297#ifndef _MALLOC_INTERNAL
1298#define _MALLOC_INTERNAL
1299#include <malloc.h>
1300#endif
1301
1302#ifndef __GNU_LIBRARY__
1303#define __sbrk  sbrk
1304#endif
1305
1306#ifdef __GNU_LIBRARY__
1307/* It is best not to declare this and cast its result on foreign operating
1308   systems with potentially hostile include files.  */
1309extern __ptr_t __sbrk __P ((int increment));
1310#endif
1311
1312#ifndef NULL
1313#define NULL 0
1314#endif
1315
1316/* Allocate INCREMENT more bytes of data space,
1317   and return the start of data space, or NULL on errors.
1318   If INCREMENT is negative, shrink data space.  */
1319__ptr_t
1320__default_morecore (increment)
1321#ifdef __STDC__
1322     ptrdiff_t increment;
1323#else
1324     int increment;
1325#endif
1326{
1327  __ptr_t result = (__ptr_t) __sbrk ((int) increment);
1328  if (result == (__ptr_t) -1)
1329    return NULL;
1330  return result;
1331}
1332/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1333
1334This library is free software; you can redistribute it and/or
1335modify it under the terms of the GNU Library General Public License as
1336published by the Free Software Foundation; either version 2 of the
1337License, or (at your option) any later version.
1338
1339This library is distributed in the hope that it will be useful,
1340but WITHOUT ANY WARRANTY; without even the implied warranty of
1341MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1342Library General Public License for more details.
1343
1344You should have received a copy of the GNU Library General Public
1345License along with this library; see the file COPYING.LIB.  If
1346not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1347Cambridge, MA 02139, USA.  */
1348
1349#ifndef _MALLOC_INTERNAL
1350#define _MALLOC_INTERNAL
1351#include <malloc.h>
1352#endif
1353
1354__ptr_t (*__memalign_hook) __P ((size_t __size, size_t __alignment));
1355
1356__ptr_t
1357memalign (alignment, size)
1358     __malloc_size_t alignment;
1359     __malloc_size_t size;
1360{
1361  __ptr_t result;
1362  unsigned long int adj;
1363
1364  if (__memalign_hook)
1365    return (*__memalign_hook) (alignment, size);
1366
1367  size = ((size + alignment - 1) / alignment) * alignment;
1368
1369  result = malloc (size);
1370  if (result == NULL)
1371    return NULL;
1372  adj = (unsigned long int) ((unsigned long int) ((char *) result -
1373                                                  (char *) NULL)) % alignment;
1374  if (adj != 0)
1375    {
1376      struct alignlist *l;
1377      for (l = _aligned_blocks; l != NULL; l = l->next)
1378        if (l->aligned == NULL)
1379          /* This slot is free.  Use it.  */
1380          break;
1381      if (l == NULL)
1382        {
1383          l = (struct alignlist *) malloc (sizeof (struct alignlist));
1384          if (l == NULL)
1385            {
1386              free (result);
1387              return NULL;
1388            }
1389          l->next = _aligned_blocks;
1390          _aligned_blocks = l;
1391        }
1392      l->exact = result;
1393      result = l->aligned = (char *) result + alignment - adj;
1394    }
1395
1396  return result;
1397}
1398
1399#endif /* HAVE_GMALLOC */
Note: See TracBrowser for help on using the repository browser.