- Timestamp:
- Feb 15, 2019, 11:51:03 AM (5 years ago)
- Branches:
- (u'fieker-DuVal', '117eb8c30fc9e991c4decca4832b1d19036c4c65')(u'spielwiese', 'c5facdfddea2addfd91babd8b9019161dea4b695')
- Children:
- 1a8dbfb80bfd40214596b9b7d6461ddaf34bbbb9
- Parents:
- 73bf78cfb1c82e93496a5857586fa0b8ee97ecfa
- Location:
- ppcc
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
ppcc/.clang-format
r73bf78 r1edbe2f 21 21 BinPackArguments: true 22 22 BinPackParameters: true 23 BraceWrapping: 23 BraceWrapping: 24 24 AfterClass: false 25 25 AfterControlStatement: false … … 56 56 ExperimentalAutoDetectBinPacking: false 57 57 FixNamespaceComments: false 58 ForEachMacros: 58 ForEachMacros: 59 59 - foreach 60 60 - Q_FOREACH 61 61 - BOOST_FOREACH 62 IncludeCategories: 62 IncludeCategories: 63 63 - Regex: '^"(llvm|llvm-c|clang|clang-c)/' 64 64 Priority: 2 … … 90 90 PenaltyReturnTypeOnItsOwnLine: 60 91 91 PointerAlignment: Right 92 RawStringFormats: 92 RawStringFormats: 93 93 - Delimiter: pb 94 94 Language: TextProto -
ppcc/Singular/include.patch
r73bf78 r1edbe2f 4 4 +++ b/Singular/libparse.cc 5 5 @@ -28,6 +28,8 @@ 6 6 7 7 #include <stdio.h> 8 8 9 9 +#include "globaldefs.h" 10 10 + 11 11 12 12 /* cfront 1.2 defines "c_plusplus" instead of "__cplusplus" */ 13 13 #ifdef c_plusplus … … 17 17 +++ b/Singular/scanner.cc 18 18 @@ -10,6 +10,7 @@ 19 19 20 20 #include <stdio.h> 21 21 22 22 +#include "globaldefs.h" 23 23 24 24 /* cfront 1.2 defines "c_plusplus" instead of "__cplusplus" */ 25 25 #ifdef c_plusplus … … 29 29 +++ b/Singular/svd_si.h 30 30 @@ -13,6 +13,7 @@ See www.alglib.net or alglib.sources.ru for details. 31 31 32 32 #include <stdlib.h> 33 33 #include <math.h> … … 35 35 #include "resources/feFopen.h" 36 36 #include "kernel/mod2.h" 37 37 38 38 diff --git a/factory/cf_defs.h b/factory/cf_defs.h 39 39 index ba1645515..7ba0ffbd9 100644 … … 43 43 #ifndef INCL_CF_DEFS_H 44 44 #define INCL_CF_DEFS_H 45 45 46 46 +#include "../Singular/globaldefs.h" 47 47 // #include "config.h" 48 48 49 49 /*BEGINPUBLIC*/ 50 50 diff --git a/factory/cf_globals.h b/factory/cf_globals.h … … 55 55 #ifndef INCL_CF_GLOBALS_H 56 56 #define INCL_CF_GLOBALS_H 57 57 58 58 +#include "../Singular/globaldefs.h" 59 59 + … … 67 67 @@ -14,8 +14,8 @@ 68 68 #include "config.h" 69 70 69 70 71 71 -#include "cf_switches.h" 72 72 #include "cf_defs.h" 73 73 +#include "cf_switches.h" 74 74 75 75 /** CFSwitches::CFSwitches () 76 76 * … … 81 81 @@ -12,6 +12,7 @@ 82 82 **/ 83 84 83 84 85 85 +#include "../Singular/globaldefs.h" 86 86 #include "config.h" 87 88 87 88 89 89 diff --git a/factory/debug.cc b/factory/debug.cc 90 90 index 886ee6756..7509b83dc 100644 … … 94 94 /* emacs edit mode for this file is -*- C++ -*- */ 95 95 +#include "../Singular/globaldefs.h" 96 96 97 97 STATIC_VAR int deb_level = -1; 98 98 VAR char * deb_level_msg = (char *)""; … … 104 104 #ifndef GFANLIB_CIRCUITTABLEINT_H_ 105 105 #define GFANLIB_CIRCUITTABLEINT_H_ 106 106 107 107 +#include "../Singular/globaldefs.h" 108 108 + … … 117 117 /* please include libpolysconfig.h exclusively via <misc/auxiliary.h> and before any other header */ 118 118 #include "libpolysconfig.h" 119 119 120 120 +#include "../Singular/globaldefs.h" 121 121 + … … 130 130 +#include "misc/auxiliary.h" 131 131 #include "sirandom.h" 132 132 133 133 /*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*/ 134 134 diff --git a/resources/feResource.h b/resources/feResource.h … … 138 138 @@ -11,6 +11,7 @@ 139 139 #endif 140 140 141 141 #ifdef __cplusplus 142 142 +#include "../Singular/globaldefs.h" -
ppcc/Singular/stitch.patch
r73bf78 r1edbe2f 6 6 #define MAX_BB_TYPES 256 7 7 // #define BLACKBOX_DEVEL 1 8 8 9 9 -static blackbox* blackboxTable[MAX_BB_TYPES]; 10 10 -static char * blackboxName[MAX_BB_TYPES]; … … 22 22 - int where=-1; 23 23 - if (MAX_BB_TYPES<=blackboxTableCnt) 24 + int where = -1; 25 + for (int i=0;i<MAX_BB_TYPES;i++) 24 + int where = -1; 25 + for (int i=0;i<MAX_BB_TYPES;i++) 26 26 { 27 27 - // second try, find empty slot from removed bb: … … 69 69 #endif 70 70 @@ -1087,7 +1087,37 @@ int iiAddCprocTop(const char *libname, const char *procname, BOOLEAN pstatic, 71 71 72 72 /*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*/ 73 73 #ifdef HAVE_DYNAMIC_LOADING … … 144 144 +++ b/Singular/tesths.cc 145 145 @@ -36,6 +36,9 @@ 146 147 146 147 148 148 extern int siInit(char *); 149 149 +#ifdef PSINGULAR 150 150 +GLOBAL_VAR char *global_argv0; 151 151 +#endif 152 152 153 153 int mmInit( void ) 154 154 { … … 156 156 omInitRet_2_Info(argv[0]); 157 157 omInitGetBackTrace(); 158 158 159 159 +#ifdef PSINGULAR 160 160 + global_argv0 = argv[0]; … … 182 182 +#endif 183 183 } 184 184 185 185 void *dynl_sym(void *handle, const char *symbol) -
ppcc/auto.def
r73bf78 r1edbe2f 384 384 } 385 385 386 make-config-header adlib/config-defs.h -auto USE_* 386 make-config-header adlib/config-defs.h -auto USE_* 387 387 make-template cnf/config.h.in adlib/config.h 388 388 make-template cnf/Makefile.in Makefile -
ppcc/autosetup/adlib.patch
r73bf78 r1edbe2f 65 65 *************** 66 66 *** 505,511 **** 67 67 68 68 # Build the command line 69 69 set cmdline {} … … 76 76 *** 659,665 **** 77 77 } 78 78 79 79 # Initialise some values from the environment or commandline or default settings 80 80 ! foreach i {LDFLAGS LIBS CPPFLAGS LINKFLAGS {CFLAGS "-g -O2"}} { … … 84 84 --- 661,667 ---- 85 85 } 86 86 87 87 # Initialise some values from the environment or commandline or default settings 88 88 ! foreach i {LDFLAGS LIBS CPPFLAGS LINKFLAGS CFLAGS} { … … 114 114 ! define CXX [find-an-executable [get-define cross]c++ [get-define cross]g++ false] 115 115 } 116 116 117 117 # CXXFLAGS default to CFLAGS if not specified 118 118 define CXXFLAGS [get-env CXXFLAGS [get-define CFLAGS]] 119 119 120 120 # May need a CC_FOR_BUILD, so look for one 121 121 ! define CC_FOR_BUILD [find-an-executable [get-env CC_FOR_BUILD ""] cc gcc false] 122 122 123 123 if {[get-define CC] eq ""} { 124 124 user-error "Could not find a C compiler. Tried: [join $try ", "]" … … 129 129 ! define CXX [find-an-executable [get-define cross]g++ [get-define cross]clang++ [get-define cross]c++ false] 130 130 } 131 131 132 132 # CXXFLAGS default to CFLAGS if not specified 133 133 define CXXFLAGS [get-env CXXFLAGS [get-define CFLAGS]] 134 134 135 135 # May need a CC_FOR_BUILD, so look for one 136 136 ! define CC_FOR_BUILD [find-an-executable [get-env CC_FOR_BUILD ""] gcc clang cc false] 137 137 138 138 if {[get-define CC] eq ""} { 139 139 user-error "Could not find a C compiler. Tried: [join $try ", "]" … … 142 142 cc-store-settings {-cflags {} -includes {} -declare {} -link 0 -lang c -libs {} -code {} -nooutput 0} 143 143 set autosetup(cc-include-deps) {} 144 144 145 145 ! msg-result "C compiler...[get-define CCACHE] [get-define CC] [get-define CFLAGS]" 146 146 if {[get-define CXX] ne "false"} { … … 148 148 } 149 149 msg-result "Build C compiler...[get-define CC_FOR_BUILD]" 150 150 151 151 --- 713,721 ---- 152 152 cc-store-settings {-cflags {} -includes {} -declare {} -link 0 -lang c -libs {} -code {} -nooutput 0} 153 153 set autosetup(cc-include-deps) {} 154 154 155 155 ! msg-result "C compiler...[get-define CC] [get-define CFLAGS]" 156 156 if {[get-define CXX] ne "false"} { … … 158 158 } 159 159 msg-result "Build C compiler...[get-define CC_FOR_BUILD]" 160 160 -
ppcc/gclib/dlmalloc.c
r73bf78 r1edbe2f 82 82 83 83 Supported pointer representation: 4 or 8 bytes 84 Supported size_t representation: 4 or 8 bytes 84 Supported size_t representation: 4 or 8 bytes 85 85 Note that size_t is allowed to be 4 bytes even if pointers are 8. 86 86 You can adjust this by defining INTERNAL_SIZE_T … … 114 114 minimal mmap unit); typically 4096 or 8192 bytes. 115 115 116 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages 116 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages 117 117 8-byte size_t: 2^64 minus about two pages 118 118 … … 148 148 149 149 Compliance: I believe it is compliant with the 1997 Single Unix Specification 150 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably 150 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably 151 151 others as well. 152 152 … … 175 175 HAVE_MEMCPY defined 176 176 USE_MEMCPY 1 if HAVE_MEMCPY is defined 177 HAVE_MMAP defined as 1 177 HAVE_MMAP defined as 1 178 178 MMAP_CLEARS 1 179 179 HAVE_MREMAP 0 unless linux defined … … 202 202 203 203 MORECORE sbrk 204 MORECORE_CONTIGUOUS 1 204 MORECORE_CONTIGUOUS 1 205 205 MORECORE_CANNOT_TRIM NOT defined 206 MMAP_AS_MORECORE_SIZE (1024 * 1024) 206 MMAP_AS_MORECORE_SIZE (1024 * 1024) 207 207 208 208 Tuning options that are also dynamically changeable via mallopt: … … 281 281 #else 282 282 #define __STD_C 0 283 #endif 283 #endif 284 284 #endif /*__STD_C*/ 285 285 … … 448 448 /* 449 449 USE_DL_PREFIX will prefix all public routines with the string 'dl'. 450 This is necessary when you only want to use this malloc in one part 450 This is necessary when you only want to use this malloc in one part 451 451 of a program, using your regular system malloc elsewhere. 452 452 */ … … 483 483 484 484 485 /* 485 /* 486 486 Two-phase name translation. 487 487 All of the actual routines are given mangled names. … … 552 552 have memset and memcpy called. People report that the macro 553 553 versions are faster than libc versions on some systems. 554 554 555 555 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks 556 556 (of <= 36 bytes) are manually unrolled in realloc and calloc. … … 587 587 malloc fails to be able to return memory, either because memory is 588 588 exhausted or because of illegal arguments. 589 590 By default, sets errno if running on STD_C platform, else does nothing. 589 590 By default, sets errno if running on STD_C platform, else does nothing. 591 591 */ 592 592 … … 677 677 #define HAVE_MMAP 1 678 678 679 /* 679 /* 680 680 Standard unix mmap using /dev/zero clears memory so calloc doesn't 681 681 need to. … … 693 693 694 694 695 /* 695 /* 696 696 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if 697 697 sbrk fails, and mmap is used as a backup (which is done only if … … 760 760 # else 761 761 # ifdef WIN32 /* use supplied emulation of getpagesize */ 762 # define malloc_getpagesize getpagesize() 762 # define malloc_getpagesize getpagesize() 763 763 # else 764 764 # ifndef LACKS_SYS_PARAM_H … … 781 781 # define malloc_getpagesize PAGESIZE 782 782 # else /* just guess */ 783 # define malloc_getpagesize (4096) 783 # define malloc_getpagesize (4096) 784 784 # endif 785 785 # endif … … 903 903 Returns a pointer to a chunk of size n that contains the same data 904 904 as does chunk p up to the minimum of (n, p's size) bytes, or null 905 if no space is available. 905 if no space is available. 906 906 907 907 The returned pointer may or may not be the same as p. The algorithm … … 909 909 equivalent of a malloc-copy-free sequence. 910 910 911 If p is null, realloc is equivalent to malloc. 911 If p is null, realloc is equivalent to malloc. 912 912 913 913 If space is not available, realloc returns null, errno is set (if on … … 980 980 M_MXFAST 1 64 0-80 (0 disables fastbins) 981 981 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming) 982 M_TOP_PAD -2 0 any 982 M_TOP_PAD -2 0 any 983 983 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support) 984 984 M_MMAP_MAX -4 65536 any (0 disables use of mmap) … … 995 995 Returns (by copy) a struct containing various summary statistics: 996 996 997 arena: current total non-mmapped bytes allocated from system 998 ordblks: the number of free chunks 997 arena: current total non-mmapped bytes allocated from system 998 ordblks: the number of free chunks 999 999 smblks: the number of fastbin blocks (i.e., small chunks that 1000 1000 have been freed but not use resused or consolidated) 1001 hblks: current number of mmapped regions 1002 hblkhd: total bytes held in mmapped regions 1001 hblks: current number of mmapped regions 1002 hblkhd: total bytes held in mmapped regions 1003 1003 usmblks: the maximum total allocated space. This will be greater 1004 1004 than current total if trimming has occurred. 1005 fsmblks: total bytes held in fastbin blocks 1005 fsmblks: total bytes held in fastbin blocks 1006 1006 uordblks: current total allocated space (normal or mmapped) 1007 fordblks: total free space 1007 fordblks: total free space 1008 1008 keepcost: the maximum number of bytes that could ideally be released 1009 1009 back to system via malloc_trim. ("ideally" means that … … 1011 1011 1012 1012 Because these fields are ints, but internal bookkeeping may 1013 be kept as longs, the reported values may wrap around zero and 1013 be kept as longs, the reported values may wrap around zero and 1014 1014 thus be inaccurate. 1015 1015 */ … … 1049 1049 space to represent elements. (In this case though, you cannot 1050 1050 independently free elements.) 1051 1051 1052 1052 independent_calloc simplifies and speeds up implementations of many 1053 1053 kinds of pools. It may also be useful when constructing large data … … 1057 1057 1058 1058 struct Node { int item; struct Node* next; }; 1059 1059 1060 1060 struct Node* build_list() { 1061 1061 struct Node** pool; … … 1063 1063 if (n <= 0) return 0; 1064 1064 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); 1065 if (pool == 0) die(); 1066 // organize into a linked list... 1065 if (pool == 0) die(); 1066 // organize into a linked list... 1067 1067 struct Node* first = pool[0]; 1068 for (i = 0; i < n-1; ++i) 1068 for (i = 0; i < n-1; ++i) 1069 1069 pool[i]->next = pool[i+1]; 1070 1070 free(pool); // Can now free the array (or not, if it is needed later) … … 1099 1099 null, it returns a chunk representing an array with zero elements 1100 1100 (which should be freed if not wanted). 1101 1101 1102 1102 Each element must be individually freed when it is no longer 1103 1103 needed. If you'd like to instead be able to free all at once, you 1104 1104 should instead use a single regular malloc, and assign pointers at 1105 particular offsets in the aggregate space. (In this case though, you 1105 particular offsets in the aggregate space. (In this case though, you 1106 1106 cannot independently free elements.) 1107 1107 … … 1160 1160 1161 1161 cfree is needed/defined on some systems that pair it with calloc, 1162 for odd historical reasons (such as: cfree is used in example 1162 for odd historical reasons (such as: cfree is used in example 1163 1163 code in the first edition of K&R). 1164 1164 */ … … 1180 1180 locked between two used chunks, so they cannot be given back to 1181 1181 the system. 1182 1182 1183 1183 The `pad' argument to malloc_trim represents the amount of free 1184 1184 trailing space to leave untrimmed. If this argument is zero, … … 1188 1188 future expected allocations without having to re-obtain memory 1189 1189 from the system. 1190 1190 1191 1191 Malloc_trim returns 1 if it actually released any memory, else 0. 1192 1192 On systems that do not support "negative sbrks", it will always … … 1276 1276 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */ 1277 1277 #ifndef M_MXFAST 1278 #define M_MXFAST 1 1278 #define M_MXFAST 1 1279 1279 #endif 1280 1280 … … 1326 1326 1327 1327 The trim value It must be greater than page size to have any useful 1328 effect. To disable trimming completely, you can set to 1328 effect. To disable trimming completely, you can set to 1329 1329 (unsigned long)(-1) 1330 1330 … … 1397 1397 Segregating space in this way has the benefits that: 1398 1398 1399 1. Mmapped space can ALWAYS be individually released back 1400 to the system, which helps keep the system level memory 1401 demands of a long-lived program low. 1399 1. Mmapped space can ALWAYS be individually released back 1400 to the system, which helps keep the system level memory 1401 demands of a long-lived program low. 1402 1402 2. Mapped memory can never become `locked' between 1403 1403 other chunks, as can happen with normally allocated chunks, which … … 1457 1457 #endif 1458 1458 1459 /* 1459 /* 1460 1460 ======================================================================== 1461 1461 To make a fully customizable malloc.h header file, cut everything 1462 above this line, put into file malloc.h, edit to suit, and #include it 1462 above this line, put into file malloc.h, edit to suit, and #include it 1463 1463 on the next line, as well as in programs that use this malloc. 1464 1464 ======================================================================== … … 1712 1712 #if USE_MEMCPY 1713 1713 1714 /* 1714 /* 1715 1715 Note: memcpy is ONLY invoked with non-overlapping regions, 1716 1716 so the (usually slower) memmove is not needed. … … 1777 1777 #endif 1778 1778 1779 /* 1780 Nearly all versions of mmap support MAP_ANONYMOUS, 1779 /* 1780 Nearly all versions of mmap support MAP_ANONYMOUS, 1781 1781 so the following is unlikely to be needed, but is 1782 1782 supplied just in case. … … 1935 1935 1936 1936 1937 /* 1937 /* 1938 1938 Check if a request is so large that it would wrap around zero when 1939 1939 padded and aligned. To simplify some other code, the bound is made … … 1943 1943 #define REQUEST_OUT_OF_RANGE(req) \ 1944 1944 ((unsigned long)(req) >= \ 1945 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE)) 1945 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE)) 1946 1946 1947 1947 /* pad request bytes into a usable size -- internal version */ … … 1959 1959 return 0; \ 1960 1960 } \ 1961 (sz) = request2size(req); 1961 (sz) = request2size(req); 1962 1962 1963 1963 /* … … 1979 1979 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) 1980 1980 1981 /* 1982 Bits to mask off when extracting size 1981 /* 1982 Bits to mask off when extracting size 1983 1983 1984 1984 Note: IS_MMAPPED is intentionally not masked off from size field in … … 2040 2040 All internal state is held in an instance of malloc_state defined 2041 2041 below. There are no other static variables, except in two optional 2042 cases: 2043 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. 2042 cases: 2043 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. 2044 2044 * If HAVE_MMAP is true, but mmap doesn't support 2045 2045 MAP_ANONYMOUS, a dummy file descriptor for mmap. … … 2070 2070 are just sequential. Keeping them in order almost never requires 2071 2071 enough traversal to warrant using fancier ordered data 2072 structures. 2072 structures. 2073 2073 2074 2074 Chunks of the same size are linked with the most … … 2083 2083 But to conserve space and improve locality, we allocate 2084 2084 only the fd/bk pointers of bins, and then use repositioning tricks 2085 to treat these as the fields of a malloc_chunk*. 2085 to treat these as the fields of a malloc_chunk*. 2086 2086 */ 2087 2087 … … 2221 2221 be consolidated with other free chunks. malloc_consolidate 2222 2222 releases all chunks in fastbins and consolidates them with 2223 other free chunks. 2223 other free chunks. 2224 2224 */ 2225 2225 … … 2241 2241 compromise heuristic to only attempt consolidation if it is likely 2242 2242 to lead to trimming. However, it is not dynamically tunable, since 2243 consolidation reduces fragmentation surrounding loarge chunks even 2243 consolidation reduces fragmentation surrounding loarge chunks even 2244 2244 if trimming is not used. 2245 2245 */ … … 2248 2248 2249 2249 /* 2250 Since the lowest 2 bits in max_fast don't matter in size comparisons, 2250 Since the lowest 2 bits in max_fast don't matter in size comparisons, 2251 2251 they are used as flags. 2252 2252 */ … … 2284 2284 #define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT) 2285 2285 2286 /* 2287 Set value of max_fast. 2286 /* 2287 Set value of max_fast. 2288 2288 Use impossibly small value if 0. 2289 2289 Precondition: there are no existing fastbin chunks. … … 2332 2332 2333 2333 /* Cache malloc_getpagesize */ 2334 unsigned int pagesize; 2334 unsigned int pagesize; 2335 2335 2336 2336 /* Statistics */ … … 2344 2344 typedef struct malloc_state *mstate; 2345 2345 2346 /* 2346 /* 2347 2347 There is exactly one instance of this struct in this malloc. 2348 2348 If you are adapting this malloc in a way that does NOT use a static … … 2358 2358 At most one "call" to get_malloc_state is made per invocation of 2359 2359 the public versions of malloc and free, but other routines 2360 that in turn invoke malloc and/or free may call more then once. 2360 that in turn invoke malloc and/or free may call more then once. 2361 2361 Also, it is called in check* routines if DEBUG is set. 2362 2362 */ … … 2382 2382 int i; 2383 2383 mbinptr bin; 2384 2384 2385 2385 /* Establish circular links for normal bins */ 2386 for (i = 1; i < NBINS; ++i) { 2386 for (i = 1; i < NBINS; ++i) { 2387 2387 bin = bin_at(av,i); 2388 2388 bin->fd = bin->bk = bin; … … 2404 2404 } 2405 2405 2406 /* 2406 /* 2407 2407 Other internal utilities operating on mstates 2408 2408 */ … … 2464 2464 2465 2465 if (!chunk_is_mmapped(p)) { 2466 2466 2467 2467 /* Has legal address ... */ 2468 2468 if (p != av->top) { … … 2478 2478 assert(prev_inuse(p)); 2479 2479 } 2480 2480 2481 2481 } 2482 2482 else { … … 2720 2720 assert(idx == i); 2721 2721 /* lists are sorted */ 2722 assert(p->bk == b || 2722 assert(p->bk == b || 2723 2723 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p)); 2724 2724 } 2725 2725 /* chunk is followed by a legal chain of inuse chunks */ 2726 2726 for (q = next_chunk(p); 2727 (q != av->top && inuse(q) && 2727 (q != av->top && inuse(q) && 2728 2728 (unsigned long)(chunksize(q)) >= MINSIZE); 2729 2729 q = next_chunk(q)) … … 2817 2817 2818 2818 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); 2819 2819 2820 2820 if (mm != (char*)(MORECORE_FAILURE)) { 2821 2821 2822 2822 /* 2823 2823 The offset to the start of the mmapped region is stored 2824 2824 in the prev_size field of the chunk. This allows us to adjust 2825 returned start address to meet alignment requirements here 2825 returned start address to meet alignment requirements here 2826 2826 and in memalign(), and still be able to compute proper 2827 2827 address argument for later munmap in free() and realloc(). 2828 2828 */ 2829 2829 2830 2830 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK; 2831 2831 if (front_misalign > 0) { … … 2839 2839 set_head(p, size|IS_MMAPPED); 2840 2840 } 2841 2841 2842 2842 /* update statistics */ 2843 2844 if (++av->n_mmaps > av->max_n_mmaps) 2843 2844 if (++av->n_mmaps > av->max_n_mmaps) 2845 2845 av->max_n_mmaps = av->n_mmaps; 2846 2846 2847 2847 sum = av->mmapped_mem += size; 2848 if (sum > (unsigned long)(av->max_mmapped_mem)) 2848 if (sum > (unsigned long)(av->max_mmapped_mem)) 2849 2849 av->max_mmapped_mem = sum; 2850 2850 sum += av->sbrked_mem; 2851 if (sum > (unsigned long)(av->max_total_mem)) 2851 if (sum > (unsigned long)(av->max_total_mem)) 2852 2852 av->max_total_mem = sum; 2853 2853 2854 2854 check_chunk(p); 2855 2855 2856 2856 return chunk2mem(p); 2857 2857 } … … 2866 2866 old_end = (char*)(chunk_at_offset(old_top, old_size)); 2867 2867 2868 brk = snd_brk = (char*)(MORECORE_FAILURE); 2869 2870 /* 2868 brk = snd_brk = (char*)(MORECORE_FAILURE); 2869 2870 /* 2871 2871 If not the first time through, we require old_size to be 2872 2872 at least MINSIZE and to have prev_inuse set. 2873 2873 */ 2874 2874 2875 assert((old_top == initial_top(av) && old_size == 0) || 2875 assert((old_top == initial_top(av) && old_size == 0) || 2876 2876 ((unsigned long) (old_size) >= MINSIZE && 2877 2877 prev_inuse(old_top))); … … 2913 2913 */ 2914 2914 2915 if (size > 0) 2915 if (size > 0) 2916 2916 brk = (char*)(MORECORE(size)); 2917 2917 … … 2940 2940 2941 2941 brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); 2942 2942 2943 2943 if (brk != (char*)(MORECORE_FAILURE)) { 2944 2944 2945 2945 /* We do not need, and cannot use, another sbrk call to find end */ 2946 2946 snd_brk = brk + size; 2947 2948 /* 2949 Record that we no longer have a contiguous sbrk region. 2947 2948 /* 2949 Record that we no longer have a contiguous sbrk region. 2950 2950 After the first time mmap is used as backup, we do not 2951 2951 ever rely on contiguous space since this could incorrectly … … 2964 2964 If MORECORE extends previous space, we can likewise extend top size. 2965 2965 */ 2966 2966 2967 2967 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) { 2968 2968 set_head(old_top, (size + old_size) | PREV_INUSE); 2969 2969 } 2970 2970 2971 2971 /* 2972 2972 Otherwise, make adjustments: 2973 2973 2974 2974 * If the first time through or noncontiguous, we need to call sbrk 2975 2975 just to find out where the end of memory lies. … … 2987 2987 which in turn causes future contiguous calls to page-align. 2988 2988 */ 2989 2989 2990 2990 else { 2991 2991 front_misalign = 0; … … 2993 2993 correction = 0; 2994 2994 aligned_brk = brk; 2995 2995 2996 2996 /* handle contiguous cases */ 2997 if (contiguous(av)) { 2998 2997 if (contiguous(av)) { 2998 2999 2999 /* Guarantee alignment of first new chunk made from this space */ 3000 3000 … … 3013 3013 aligned_brk += correction; 3014 3014 } 3015 3015 3016 3016 /* 3017 3017 If this isn't adjacent to existing space, then we will not 3018 3018 be able to merge with old_top space, so must add to 2nd request. 3019 3019 */ 3020 3020 3021 3021 correction += old_size; 3022 3022 3023 3023 /* Extend the end address to hit a page boundary */ 3024 3024 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction); 3025 3025 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; 3026 3026 3027 3027 assert(correction >= 0); 3028 3028 snd_brk = (char*)(MORECORE(correction)); 3029 3029 3030 3030 /* 3031 3031 If can't allocate correction, try to at least find out current 3032 3032 brk. It might be enough to proceed without failing. 3033 3033 3034 3034 Note that if second sbrk did NOT fail, we assume that space 3035 3035 is contiguous with first sbrk. This is a safe assumption unless … … 3037 3037 occurred between our first and second calls. 3038 3038 */ 3039 3039 3040 3040 if (snd_brk == (char*)(MORECORE_FAILURE)) { 3041 3041 correction = 0; … … 3043 3043 } 3044 3044 } 3045 3045 3046 3046 /* handle non-contiguous cases */ 3047 else { 3047 else { 3048 3048 /* MORECORE/mmap must correctly align */ 3049 3049 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0); 3050 3050 3051 3051 /* Find out current end of memory */ 3052 3052 if (snd_brk == (char*)(MORECORE_FAILURE)) { … … 3054 3054 } 3055 3055 } 3056 3056 3057 3057 /* Adjust top based on results of second sbrk */ 3058 3058 if (snd_brk != (char*)(MORECORE_FAILURE)) { … … 3060 3060 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); 3061 3061 av->sbrked_mem += correction; 3062 3062 3063 3063 /* 3064 3064 If not the first time through, we either have a … … 3069 3069 two to make sizes and alignments work out. 3070 3070 */ 3071 3071 3072 3072 if (old_size != 0) { 3073 /* 3073 /* 3074 3074 Shrink old_top to insert fenceposts, keeping size a 3075 3075 multiple of MALLOC_ALIGNMENT. We know there is at least … … 3078 3078 old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; 3079 3079 set_head(old_top, old_size | PREV_INUSE); 3080 3080 3081 3081 /* 3082 3082 Note that the following assignments completely overwrite … … 3099 3099 } 3100 3100 } 3101 3101 3102 3102 /* Update statistics */ 3103 3103 sum = av->sbrked_mem; 3104 3104 if (sum > (unsigned long)(av->max_sbrked_mem)) 3105 3105 av->max_sbrked_mem = sum; 3106 3106 3107 3107 sum += av->mmapped_mem; 3108 3108 if (sum > (unsigned long)(av->max_total_mem)) … … 3110 3110 3111 3111 check_malloc_state(); 3112 3112 3113 3113 /* finally, do the allocation */ 3114 3114 p = av->top; 3115 3115 size = chunksize(p); 3116 3116 3117 3117 /* check that one of the above allocation paths succeeded */ 3118 3118 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { … … 3157 3157 pagesz = av->pagesize; 3158 3158 top_size = chunksize(av->top); 3159 3159 3160 3160 /* Release in pagesize units, keeping at least one page */ 3161 3161 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; 3162 3162 3163 3163 if (extra > 0) { 3164 3164 3165 3165 /* 3166 3166 Only proceed if end of memory is where we last set it. … … 3169 3169 current_brk = (char*)(MORECORE(0)); 3170 3170 if (current_brk == (char*)(av->top) + top_size) { 3171 3171 3172 3172 /* 3173 3173 Attempt to release memory. We ignore MORECORE return value, … … 3179 3179 some downstream failure.) 3180 3180 */ 3181 3181 3182 3182 MORECORE(-extra); 3183 3183 new_brk = (char*)(MORECORE(0)); 3184 3184 3185 3185 if (new_brk != (char*)MORECORE_FAILURE) { 3186 3186 released = (long)(current_brk - new_brk); 3187 3187 3188 3188 if (released != 0) { 3189 3189 /* Success. Adjust top. */ … … 3247 3247 */ 3248 3248 3249 if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) { 3249 if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) { 3250 3250 fb = &(av->fastbins[(fastbin_index(nb))]); 3251 3251 if ( (victim = *fb) != 0) { … … 3276 3276 bin->bk = bck; 3277 3277 bck->fd = bin; 3278 3278 3279 3279 check_malloced_chunk(victim, nb); 3280 3280 return chunk2mem(victim); … … 3283 3283 } 3284 3284 3285 /* 3285 /* 3286 3286 If this is a large request, consolidate fastbins before continuing. 3287 3287 While it might look excessive to kill all fastbins before … … 3289 3289 fragmentation problems normally associated with fastbins. 3290 3290 Also, in practice, programs tend to have runs of either small or 3291 large requests, but less often mixtures, so consolidation is not 3291 large requests, but less often mixtures, so consolidation is not 3292 3292 invoked all that often in most programs. And the programs that 3293 3293 it is called frequently in otherwise tend to fragment. … … 3296 3296 else { 3297 3297 idx = largebin_index(nb); 3298 if (have_fastchunks(av)) 3298 if (have_fastchunks(av)) 3299 3299 malloc_consolidate(av); 3300 3300 } … … 3312 3312 otherwise need to expand memory to service a "small" request. 3313 3313 */ 3314 3315 for(;;) { 3316 3314 3315 for(;;) { 3316 3317 3317 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { 3318 3318 bck = victim->bk; 3319 3319 size = chunksize(victim); 3320 3320 3321 /* 3321 /* 3322 3322 If a small request, try to use last remainder if it is the 3323 3323 only chunk in unsorted bin. This helps promote locality for … … 3327 3327 */ 3328 3328 3329 if (in_smallbin_range(nb) && 3329 if (in_smallbin_range(nb) && 3330 3330 bck == unsorted_chunks(av) && 3331 3331 victim == av->last_remainder && … … 3336 3336 remainder = chunk_at_offset(victim, nb); 3337 3337 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; 3338 av->last_remainder = remainder; 3338 av->last_remainder = remainder; 3339 3339 remainder->bk = remainder->fd = unsorted_chunks(av); 3340 3340 3341 3341 set_head(victim, nb | PREV_INUSE); 3342 3342 set_head(remainder, remainder_size | PREV_INUSE); 3343 3343 set_foot(remainder, remainder_size); 3344 3344 3345 3345 check_malloced_chunk(victim, nb); 3346 3346 return chunk2mem(victim); … … 3350 3350 unsorted_chunks(av)->bk = bck; 3351 3351 bck->fd = unsorted_chunks(av); 3352 3352 3353 3353 /* Take now instead of binning if exact fit */ 3354 3354 3355 3355 if (size == nb) { 3356 3356 set_inuse_bit_at_offset(victim, size); … … 3358 3358 return chunk2mem(victim); 3359 3359 } 3360 3360 3361 3361 /* place chunk in bin */ 3362 3362 3363 3363 if (in_smallbin_range(size)) { 3364 3364 victim_index = smallbin_index(size); … … 3380 3380 } 3381 3381 else { 3382 while ((unsigned long)(size) < (unsigned long)(fwd->size)) 3382 while ((unsigned long)(size) < (unsigned long)(fwd->size)) 3383 3383 fwd = fwd->fd; 3384 3384 bck = fwd->bk; … … 3386 3386 } 3387 3387 } 3388 3388 3389 3389 mark_bin(av, victim_index); 3390 3390 victim->bk = bck; … … 3393 3393 bck->fd = victim; 3394 3394 } 3395 3395 3396 3396 /* 3397 3397 If a large request, scan through the chunks of current bin in … … 3400 3400 anything useful with them. However the lists tend to be short. 3401 3401 */ 3402 3402 3403 3403 if (!in_smallbin_range(nb)) { 3404 3404 bin = bin_at(av, idx); … … 3408 3408 (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) { 3409 3409 3410 while (((unsigned long)(size = chunksize(victim)) < 3410 while (((unsigned long)(size = chunksize(victim)) < 3411 3411 (unsigned long)(nb))) 3412 3412 victim = victim->bk; … … 3414 3414 remainder_size = size - nb; 3415 3415 unlink(victim, bck, fwd); 3416 3416 3417 3417 /* Exhaust */ 3418 3418 if (remainder_size < MINSIZE) { … … 3431 3431 check_malloced_chunk(victim, nb); 3432 3432 return chunk2mem(victim); 3433 } 3433 } 3434 3434 } 3435 } 3435 } 3436 3436 3437 3437 /* … … 3440 3440 (with ties going to approximately the least recently used) chunk 3441 3441 that fits is selected. 3442 3442 3443 3443 The bitmap avoids needing to check that most blocks are nonempty. 3444 3444 The particular case of skipping all bins during warm-up phases 3445 3445 when no chunks have been returned yet is faster than it might look. 3446 3446 */ 3447 3447 3448 3448 ++idx; 3449 3449 bin = bin_at(av,idx); … … 3451 3451 map = av->binmap[block]; 3452 3452 bit = idx2bit(idx); 3453 3453 3454 3454 for (;;) { 3455 3455 … … 3464 3464 bit = 1; 3465 3465 } 3466 3466 3467 3467 /* Advance to bin with set bit. There must be one. */ 3468 3468 while ((bit & map) == 0) { … … 3471 3471 assert(bit != 0); 3472 3472 } 3473 3473 3474 3474 /* Inspect the bin. It is likely to be non-empty */ 3475 3475 victim = last(bin); 3476 3476 3477 3477 /* If a false alarm (empty bin), clear the bit. */ 3478 3478 if (victim == bin) { … … 3481 3481 bit <<= 1; 3482 3482 } 3483 3483 3484 3484 else { 3485 3485 size = chunksize(victim); … … 3489 3489 3490 3490 remainder_size = size - nb; 3491 3491 3492 3492 /* unlink */ 3493 3493 bck = victim->bk; 3494 3494 bin->bk = bck; 3495 3495 bck->fd = bin; 3496 3496 3497 3497 /* Exhaust */ 3498 3498 if (remainder_size < MINSIZE) { … … 3501 3501 return chunk2mem(victim); 3502 3502 } 3503 3503 3504 3504 /* Split */ 3505 3505 else { 3506 3506 remainder = chunk_at_offset(victim, nb); 3507 3507 3508 3508 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; 3509 3509 remainder->bk = remainder->fd = unsorted_chunks(av); 3510 3510 /* advertise as last remainder */ 3511 if (in_smallbin_range(nb)) 3512 av->last_remainder = remainder; 3513 3511 if (in_smallbin_range(nb)) 3512 av->last_remainder = remainder; 3513 3514 3514 set_head(victim, nb | PREV_INUSE); 3515 3515 set_head(remainder, remainder_size | PREV_INUSE); … … 3521 3521 } 3522 3522 3523 use_top: 3523 use_top: 3524 3524 /* 3525 3525 If large enough, split off the chunk bordering the end of memory … … 3539 3539 victim = av->top; 3540 3540 size = chunksize(victim); 3541 3541 3542 3542 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { 3543 3543 remainder_size = size - nb; … … 3563 3563 } 3564 3564 3565 /* 3566 Otherwise, relay to handle system-dependent cases 3565 /* 3566 Otherwise, relay to handle system-dependent cases 3567 3567 */ 3568 else 3569 return sYSMALLOc(nb, av); 3568 else 3569 return sYSMALLOc(nb, av); 3570 3570 } 3571 3571 } … … 3609 3609 3610 3610 #if TRIM_FASTBINS 3611 /* 3611 /* 3612 3612 If TRIM_FASTBINS set, don't place chunks 3613 3613 bordering top into fastbins … … 3665 3665 set_head(p, size | PREV_INUSE); 3666 3666 set_foot(p, size); 3667 3667 3668 3668 check_free_chunk(p); 3669 3669 } … … 3694 3694 */ 3695 3695 3696 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { 3697 if (have_fastchunks(av)) 3696 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { 3697 if (have_fastchunks(av)) 3698 3698 malloc_consolidate(av); 3699 3699 3700 #ifndef MORECORE_CANNOT_TRIM 3701 if ((unsigned long)(chunksize(av->top)) >= 3702 (unsigned long)(av->trim_threshold)) 3700 #ifndef MORECORE_CANNOT_TRIM 3701 if ((unsigned long)(chunksize(av->top)) >= 3702 (unsigned long)(av->trim_threshold)) 3703 3703 sYSTRIm(av->top_pad, av); 3704 3704 #endif … … 3736 3736 fastbins. So, instead, we need to use a minor variant of the same 3737 3737 code. 3738 3738 3739 3739 Also, because this routine needs to be called the first time through 3740 3740 malloc anyway, it turns out to be the perfect place to trigger … … 3781 3781 reused anyway. 3782 3782 */ 3783 3783 3784 3784 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); 3785 3785 fb = &(av->fastbins[0]); … … 3787 3787 if ( (p = *fb) != 0) { 3788 3788 *fb = 0; 3789 3789 3790 3790 do { 3791 3791 check_inuse_chunk(p); 3792 3792 nextp = p->fd; 3793 3793 3794 3794 /* Slightly streamlined version of consolidation code in free() */ 3795 3795 size = p->size & ~PREV_INUSE; 3796 3796 nextchunk = chunk_at_offset(p, size); 3797 3797 nextsize = chunksize(nextchunk); 3798 3798 3799 3799 if (!prev_inuse(p)) { 3800 3800 prevsize = p->prev_size; … … 3803 3803 unlink(p, bck, fwd); 3804 3804 } 3805 3805 3806 3806 if (nextchunk != av->top) { 3807 3807 nextinuse = inuse_bit_at_offset(nextchunk, nextsize); 3808 3808 set_head(nextchunk, nextsize); 3809 3809 3810 3810 if (!nextinuse) { 3811 3811 size += nextsize; 3812 3812 unlink(nextchunk, bck, fwd); 3813 3813 } 3814 3814 3815 3815 first_unsorted = unsorted_bin->fd; 3816 3816 unsorted_bin->fd = p; 3817 3817 first_unsorted->bk = p; 3818 3818 3819 3819 set_head(p, size | PREV_INUSE); 3820 3820 p->bk = unsorted_bin; … … 3822 3822 set_foot(p, size); 3823 3823 } 3824 3824 3825 3825 else { 3826 3826 size += nextsize; … … 3828 3828 av->top = p; 3829 3829 } 3830 3830 3831 3831 } while ( (p = nextp) != 0); 3832 3832 3833 3833 } 3834 3834 } while (fb++ != maxfb); … … 3872 3872 unsigned long copysize; /* bytes to copy */ 3873 3873 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */ 3874 INTERNAL_SIZE_T* s; /* copy source */ 3874 INTERNAL_SIZE_T* s; /* copy source */ 3875 3875 INTERNAL_SIZE_T* d; /* copy destination */ 3876 3876 … … 3913 3913 return chunk2mem(oldp); 3914 3914 } 3915 3915 3916 3916 /* Try to expand forward into next chunk; split off remainder below */ 3917 else if (next != av->top && 3917 else if (next != av->top && 3918 3918 !inuse(next) && 3919 3919 (unsigned long)(newsize = oldsize + chunksize(next)) >= … … 3928 3928 if (newmem == 0) 3929 3929 return 0; /* propagate failure */ 3930 3930 3931 3931 newp = mem2chunk(newmem); 3932 3932 newsize = chunksize(newp); 3933 3933 3934 3934 /* 3935 3935 Avoid copy if newp is next chunk after oldp. … … 3945 3945 INTERNAL_SIZE_T-sized words; minimally 3. 3946 3946 */ 3947 3947 3948 3948 copysize = oldsize - SIZE_SZ; 3949 3949 s = (INTERNAL_SIZE_T*)(oldmem); … … 3951 3951 ncopies = copysize / sizeof(INTERNAL_SIZE_T); 3952 3952 assert(ncopies >= 3); 3953 3953 3954 3954 if (ncopies > 9) 3955 3955 MALLOC_COPY(d, s, copysize); 3956 3956 3957 3957 else { 3958 3958 *(d+0) = *(s+0); … … 3972 3972 } 3973 3973 } 3974 3974 3975 3975 fREe(oldmem); 3976 3976 check_inuse_chunk(newp); … … 3996 3996 /* Mark remainder as inuse so free() won't complain */ 3997 3997 set_inuse_bit_at_offset(remainder, remainder_size); 3998 fREe(chunk2mem(remainder)); 3998 fREe(chunk2mem(remainder)); 3999 3999 } 4000 4000 … … 4015 4015 char *cp; 4016 4016 unsigned long sum; 4017 4017 4018 4018 /* Note the extra SIZE_SZ overhead */ 4019 4019 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask; 4020 4020 4021 4021 /* don't need to remap if still within same page */ 4022 if (oldsize == newsize - offset) 4022 if (oldsize == newsize - offset) 4023 4023 return oldmem; 4024 4024 4025 4025 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); 4026 4026 4027 4027 if (cp != (char*)MORECORE_FAILURE) { 4028 4028 4029 4029 newp = (mchunkptr)(cp + offset); 4030 4030 set_head(newp, (newsize - offset)|IS_MMAPPED); 4031 4031 4032 4032 assert(aligned_OK(chunk2mem(newp))); 4033 4033 assert((newp->prev_size == offset)); 4034 4034 4035 4035 /* update statistics */ 4036 4036 sum = av->mmapped_mem += newsize - oldsize; 4037 if (sum > (unsigned long)(av->max_mmapped_mem)) 4037 if (sum > (unsigned long)(av->max_mmapped_mem)) 4038 4038 av->max_mmapped_mem = sum; 4039 4039 sum += av->sbrked_mem; 4040 if (sum > (unsigned long)(av->max_total_mem)) 4040 if (sum > (unsigned long)(av->max_total_mem)) 4041 4041 av->max_total_mem = sum; 4042 4042 4043 4043 return chunk2mem(newp); 4044 4044 } … … 4046 4046 4047 4047 /* Note the extra SIZE_SZ overhead. */ 4048 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ)) 4048 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ)) 4049 4049 newmem = oldmem; /* do nothing */ 4050 4050 else { … … 4058 4058 return newmem; 4059 4059 4060 #else 4060 #else 4061 4061 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */ 4062 4062 check_malloc_state(); … … 4195 4195 if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */ 4196 4196 #endif 4197 { 4197 { 4198 4198 /* 4199 4199 Unroll clear of <= 36 bytes (72 if 8byte sizes) … … 4286 4286 4287 4287 #if __STD_C 4288 static Void_t** iALLOc(size_t n_elements, 4289 size_t* sizes, 4288 static Void_t** iALLOc(size_t n_elements, 4289 size_t* sizes, 4290 4290 int opts, 4291 4291 Void_t* chunks[]) … … 4304 4304 mchunkptr array_chunk; /* chunk for malloced ptr array */ 4305 4305 int mmx; /* to disable mmap */ 4306 INTERNAL_SIZE_T size; 4306 INTERNAL_SIZE_T size; 4307 4307 size_t i; 4308 4308 … … 4319 4319 else { 4320 4320 /* if empty req, must still return chunk representing empty array */ 4321 if (n_elements == 0) 4321 if (n_elements == 0) 4322 4322 return (Void_t**) mALLOc(0); 4323 4323 marray = 0; … … 4333 4333 element_size = 0; 4334 4334 contents_size = 0; 4335 for (i = 0; i != n_elements; ++i) 4336 contents_size += request2size(sizes[i]); 4335 for (i = 0; i != n_elements; ++i) 4336 contents_size += request2size(sizes[i]); 4337 4337 } 4338 4338 4339 4339 /* subtract out alignment bytes from total to minimize overallocation */ 4340 4340 size = contents_size + array_size - MALLOC_ALIGN_MASK; 4341 4342 /* 4341 4342 /* 4343 4343 Allocate the aggregate chunk. 4344 4344 But first disable mmap so malloc won't use it, since … … 4350 4350 mem = mALLOc(size); 4351 4351 av->n_mmaps_max = mmx; /* reset mmap */ 4352 if (mem == 0) 4352 if (mem == 0) 4353 4353 return 0; 4354 4354 4355 4355 p = mem2chunk(mem); 4356 assert(!chunk_is_mmapped(p)); 4356 assert(!chunk_is_mmapped(p)); 4357 4357 remainder_size = chunksize(p); 4358 4358 … … 4373 4373 marray[i] = chunk2mem(p); 4374 4374 if (i != n_elements-1) { 4375 if (element_size != 0) 4375 if (element_size != 0) 4376 4376 size = element_size; 4377 4377 else 4378 size = request2size(sizes[i]); 4378 size = request2size(sizes[i]); 4379 4379 remainder_size -= size; 4380 4380 set_head(p, size | PREV_INUSE); … … 4390 4390 if (marray != chunks) { 4391 4391 /* final element must have exactly exhausted chunk */ 4392 if (element_size != 0) 4392 if (element_size != 0) 4393 4393 assert(remainder_size == element_size); 4394 4394 else … … 4440 4440 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1)); 4441 4441 } 4442 4442 4443 4443 4444 4444 /* … … 4456 4456 malloc_consolidate(av); 4457 4457 4458 #ifndef MORECORE_CANNOT_TRIM 4458 #ifndef MORECORE_CANNOT_TRIM 4459 4459 return sYSTRIm(pad, av); 4460 4460 #else … … 4557 4557 unsigned long free, reserved, committed; 4558 4558 vminfo (&free, &reserved, &committed); 4559 fprintf(stderr, "free bytes = %10lu\n", 4559 fprintf(stderr, "free bytes = %10lu\n", 4560 4560 free); 4561 fprintf(stderr, "reserved bytes = %10lu\n", 4561 fprintf(stderr, "reserved bytes = %10lu\n", 4562 4562 reserved); 4563 fprintf(stderr, "committed bytes = %10lu\n", 4563 fprintf(stderr, "committed bytes = %10lu\n", 4564 4564 committed); 4565 4565 } … … 4575 4575 4576 4576 4577 #ifdef WIN32 4577 #ifdef WIN32 4578 4578 { 4579 4579 unsigned long kernel, user; 4580 4580 if (cpuinfo (TRUE, &kernel, &user)) { 4581 fprintf(stderr, "kernel ms = %10lu\n", 4581 fprintf(stderr, "kernel ms = %10lu\n", 4582 4582 kernel); 4583 fprintf(stderr, "user ms = %10lu\n", 4583 fprintf(stderr, "user ms = %10lu\n", 4584 4584 user); 4585 4585 } … … 4638 4638 4639 4639 4640 /* 4640 /* 4641 4641 -------------------- Alternative MORECORE functions -------------------- 4642 4642 */ … … 4653 4653 only be called with arguments that are multiples of pagesize. 4654 4654 4655 * MORECORE(0) must return an address that is at least 4655 * MORECORE(0) must return an address that is at least 4656 4656 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.) 4657 4657 … … 4777 4777 4778 4778 4779 /* 4780 -------------------------------------------------------------- 4781 4782 Emulation of sbrk for win32. 4779 /* 4780 -------------------------------------------------------------- 4781 4782 Emulation of sbrk for win32. 4783 4783 Donated by J. Walter <Walter@GeNeSys-e.de>. 4784 For additional information about this code, and malloc on Win32, see 4784 For additional information about this code, and malloc on Win32, see 4785 4785 http://www.genesys-e.de/jwalter/ 4786 4786 */ … … 4798 4798 /* Wait for spin lock */ 4799 4799 static int slwait (int *sl) { 4800 while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0) 4800 while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0) 4801 4801 Sleep (0); 4802 4802 return 0; … … 4898 4898 } 4899 4899 if (! g_last) { 4900 if (! region_list_append (&g_last, 0, 0)) 4900 if (! region_list_append (&g_last, 0, 0)) 4901 4901 goto sbrk_exit; 4902 4902 } … … 4965 4965 assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); 4966 4966 #ifdef TRACE 4967 printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize, 4968 memory_info.State == MEM_FREE ? "FREE": 4967 printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize, 4968 memory_info.State == MEM_FREE ? "FREE": 4969 4969 (memory_info.State == MEM_RESERVE ? "RESERVED": 4970 4970 (memory_info.State == MEM_COMMIT ? "COMMITTED": "?"))); … … 4987 4987 } 4988 4988 /* Search failed? */ 4989 if (! found) 4989 if (! found) 4990 4990 goto sbrk_exit; 4991 4991 /* Assert preconditions */ … … 4993 4993 assert (0 < reserve_size && reserve_size % g_regionsize == 0); 4994 4994 /* Try to reserve this */ 4995 base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size, 4995 base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size, 4996 4996 MEM_RESERVE, PAGE_NOACCESS); 4997 4997 if (! base_reserved) { 4998 4998 int rc = GetLastError (); 4999 if (rc != ERROR_INVALID_ADDRESS) 4999 if (rc != ERROR_INVALID_ADDRESS) 5000 5000 goto sbrk_exit; 5001 5001 } … … 5022 5022 /* Round size to commit */ 5023 5023 commit_size = CEIL (to_commit, g_my_pagesize); 5024 } 5024 } 5025 5025 /* Append the new region to the list */ 5026 5026 if (! region_list_append (&g_last, base_reserved, reserve_size)) … … 5034 5034 } 5035 5035 } 5036 } 5036 } 5037 5037 /* Assert preconditions */ 5038 5038 assert ((unsigned) g_last->top_committed % g_pagesize == 0); 5039 5039 assert (0 < commit_size && commit_size % g_pagesize == 0); { 5040 5040 /* Commit this */ 5041 void *base_committed = VirtualAlloc (g_last->top_committed, commit_size, 5041 void *base_committed = VirtualAlloc (g_last->top_committed, commit_size, 5042 5042 MEM_COMMIT, PAGE_READWRITE); 5043 5043 /* Check returned pointer for consistency */ … … 5052 5052 g_last->top_committed = (char *) base_committed + commit_size; 5053 5053 } 5054 } 5054 } 5055 5055 /* Adjust the regions allocation top */ 5056 5056 g_last->top_allocated = (char *) g_last->top_allocated + allocate_size; … … 5066 5066 void *base_reserved = (char *) g_last->top_reserved - release_size; 5067 5067 /* Assert preconditions */ 5068 assert ((unsigned) base_reserved % g_regionsize == 0); 5068 assert ((unsigned) base_reserved % g_regionsize == 0); 5069 5069 assert (0 < release_size && release_size % g_regionsize == 0); { 5070 5070 /* Release this */ 5071 int rc = VirtualFree (base_reserved, 0, 5071 int rc = VirtualFree (base_reserved, 0, 5072 5072 MEM_RELEASE); 5073 5073 /* Check returned code for consistency */ … … 5095 5095 assert (0 < decommit_size && decommit_size % g_pagesize == 0); { 5096 5096 /* Decommit this */ 5097 int rc = VirtualFree ((char *) base_committed, decommit_size, 5097 int rc = VirtualFree ((char *) base_committed, decommit_size, 5098 5098 MEM_DECOMMIT); 5099 5099 /* Check returned code for consistency */ … … 5151 5151 #endif 5152 5152 /* First time initialization */ 5153 if (! g_pagesize) 5153 if (! g_pagesize) 5154 5154 g_pagesize = getpagesize (); 5155 if (! g_regionsize) 5155 if (! g_regionsize) 5156 5156 g_regionsize = getregionsize (); 5157 5157 /* Assert preconditions */ … … 5191 5191 #endif 5192 5192 /* First time initialization */ 5193 if (! g_pagesize) 5193 if (! g_pagesize) 5194 5194 g_pagesize = getpagesize (); 5195 if (! g_regionsize) 5195 if (! g_regionsize) 5196 5196 g_regionsize = getregionsize (); 5197 5197 /* Assert preconditions */ … … 5199 5199 assert (size % g_pagesize == 0); 5200 5200 /* Free this */ 5201 if (! VirtualFree (ptr, 0, 5201 if (! VirtualFree (ptr, 0, 5202 5202 MEM_RELEASE)) 5203 5203 goto munmap_exit; … … 5237 5237 if (whole) { 5238 5238 __int64 creation64, exit64, kernel64, user64; 5239 int rc = GetProcessTimes (GetCurrentProcess (), 5240 (FILETIME *) &creation64, 5241 (FILETIME *) &exit64, 5242 (FILETIME *) &kernel64, 5239 int rc = GetProcessTimes (GetCurrentProcess (), 5240 (FILETIME *) &creation64, 5241 (FILETIME *) &exit64, 5242 (FILETIME *) &kernel64, 5243 5243 (FILETIME *) &user64); 5244 5244 if (! rc) { … … 5246 5246 *user = 0; 5247 5247 return FALSE; 5248 } 5248 } 5249 5249 *kernel = (unsigned long) (kernel64 / 10000); 5250 5250 *user = (unsigned long) (user64 / 10000); … … 5252 5252 } else { 5253 5253 __int64 creation64, exit64, kernel64, user64; 5254 int rc = GetThreadTimes (GetCurrentThread (), 5255 (FILETIME *) &creation64, 5256 (FILETIME *) &exit64, 5257 (FILETIME *) &kernel64, 5254 int rc = GetThreadTimes (GetCurrentThread (), 5255 (FILETIME *) &creation64, 5256 (FILETIME *) &exit64, 5257 (FILETIME *) &kernel64, 5258 5258 (FILETIME *) &user64); 5259 5259 if (! rc) { … … 5261 5261 *user = 0; 5262 5262 return FALSE; 5263 } 5263 } 5264 5264 *kernel = (unsigned long) (kernel64 / 10000); 5265 5265 *user = (unsigned long) (user64 / 10000); … … 5281 5281 Thanks also to Andreas Mueller <a.mueller at paradatec.de>, 5282 5282 and Anonymous. 5283 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for 5283 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for 5284 5284 helping test this.) 5285 5285 * memalign: check alignment arg … … 5292 5292 * Introduce fastbins (although similar to 2.5.1) 5293 5293 * Many minor tunings and cosmetic improvements 5294 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK 5294 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK 5295 5295 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS 5296 5296 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others. -
ppcc/src/Makefile.extra
r73bf78 r1edbe2f 2 2 TCLSH=`autosetup/autosetup-find-tclsh` 3 3 PREPROC_TCL=$(TCLSH) src/script/preproc.tcl 4 src/pplex.h: src/pplex.h.in 4 src/pplex.h: src/pplex.h.in 5 5 $(PREPROC_TCL) src/pplex.h.in 6 src/pplex.re: src/pplex.re.in 6 src/pplex.re: src/pplex.re.in 7 7 $(PREPROC_TCL) src/pplex.re.in 8 8 CONFIGITEMS += src/pplex.h src/pplex.re -
ppcc/src/pplex.cc
r73bf78 r1edbe2f 88 88 while (!done) { 89 89 const char *last = cursor; 90 90 91 91 #line 92 "src/pplex.cc" 92 92 { … … 2124 2124 2125 2125 comment: 2126 2126 2127 2127 #line 2128 "src/pplex.cc" 2128 2128 {
Note: See TracChangeset
for help on using the changeset viewer.