comparison src/stdlib/SDL_malloc.c @ 1668:4da1ee79c9af SDL-1.3

more tweaking indent options
author Sam Lantinga <slouken@libsdl.org>
date Mon, 29 May 2006 04:04:35 +0000
parents 782fd950bd46
children
comparison
equal deleted inserted replaced
1667:1fddae038bc8 1668:4da1ee79c9af
716 arguments that would be negative if signed are interpreted as 716 arguments that would be negative if signed are interpreted as
717 requests for huge amounts of space, which will often fail. The 717 requests for huge amounts of space, which will often fail. The
718 maximum supported value of n differs across systems, but is in all 718 maximum supported value of n differs across systems, but is in all
719 cases less than the maximum representable value of a size_t. 719 cases less than the maximum representable value of a size_t.
720 */ 720 */
721 void *dlmalloc (size_t); 721 void *dlmalloc(size_t);
722 722
723 /* 723 /*
724 free(void* p) 724 free(void* p)
725 Releases the chunk of memory pointed to by p, that had been previously 725 Releases the chunk of memory pointed to by p, that had been previously
726 allocated using malloc or a related routine such as realloc. 726 allocated using malloc or a related routine such as realloc.
727 It has no effect if p is null. If p was not malloced or already 727 It has no effect if p is null. If p was not malloced or already
728 freed, free(p) will by default cause the current program to abort. 728 freed, free(p) will by default cause the current program to abort.
729 */ 729 */
730 void dlfree (void *); 730 void dlfree(void *);
731 731
732 /* 732 /*
733 calloc(size_t n_elements, size_t element_size); 733 calloc(size_t n_elements, size_t element_size);
734 Returns a pointer to n_elements * element_size bytes, with all locations 734 Returns a pointer to n_elements * element_size bytes, with all locations
735 set to zero. 735 set to zero.
736 */ 736 */
737 void *dlcalloc (size_t, size_t); 737 void *dlcalloc(size_t, size_t);
738 738
739 /* 739 /*
740 realloc(void* p, size_t n) 740 realloc(void* p, size_t n)
741 Returns a pointer to a chunk of size n that contains the same data 741 Returns a pointer to a chunk of size n that contains the same data
742 as does chunk p up to the minimum of (n, p's size) bytes, or null 742 as does chunk p up to the minimum of (n, p's size) bytes, or null
757 757
758 The old unix realloc convention of allowing the last-free'd chunk 758 The old unix realloc convention of allowing the last-free'd chunk
759 to be used as an argument to realloc is not supported. 759 to be used as an argument to realloc is not supported.
760 */ 760 */
761 761
762 void *dlrealloc (void *, size_t); 762 void *dlrealloc(void *, size_t);
763 763
764 /* 764 /*
765 memalign(size_t alignment, size_t n); 765 memalign(size_t alignment, size_t n);
766 Returns a pointer to a newly allocated chunk of n bytes, aligned 766 Returns a pointer to a newly allocated chunk of n bytes, aligned
767 in accord with the alignment argument. 767 in accord with the alignment argument.
771 8-byte alignment is guaranteed by normal malloc calls, so don't 771 8-byte alignment is guaranteed by normal malloc calls, so don't
772 bother calling memalign with an argument of 8 or less. 772 bother calling memalign with an argument of 8 or less.
773 773
774 Overreliance on memalign is a sure way to fragment space. 774 Overreliance on memalign is a sure way to fragment space.
775 */ 775 */
776 void *dlmemalign (size_t, size_t); 776 void *dlmemalign(size_t, size_t);
777 777
778 /* 778 /*
779 valloc(size_t n); 779 valloc(size_t n);
780 Equivalent to memalign(pagesize, n), where pagesize is the page 780 Equivalent to memalign(pagesize, n), where pagesize is the page
781 size of the system. If the pagesize is unknown, 4096 is used. 781 size of the system. If the pagesize is unknown, 4096 is used.
782 */ 782 */
783 void *dlvalloc (size_t); 783 void *dlvalloc(size_t);
784 784
785 /* 785 /*
786 mallopt(int parameter_number, int parameter_value) 786 mallopt(int parameter_number, int parameter_value)
787 Sets tunable parameters The format is to provide a 787 Sets tunable parameters The format is to provide a
788 (parameter-number, parameter-value) pair. mallopt then sets the 788 (parameter-number, parameter-value) pair. mallopt then sets the
798 Symbol param # default allowed param values 798 Symbol param # default allowed param values
799 M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables) 799 M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables)
800 M_GRANULARITY -2 page size any power of 2 >= page size 800 M_GRANULARITY -2 page size any power of 2 >= page size
801 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) 801 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
802 */ 802 */
803 int dlmallopt (int, int); 803 int dlmallopt(int, int);
804 804
805 /* 805 /*
806 malloc_footprint(); 806 malloc_footprint();
807 Returns the number of bytes obtained from the system. The total 807 Returns the number of bytes obtained from the system. The total
808 number of bytes allocated by malloc, realloc etc., is less than this 808 number of bytes allocated by malloc, realloc etc., is less than this
809 value. Unlike mallinfo, this function returns only a precomputed 809 value. Unlike mallinfo, this function returns only a precomputed
810 result, so can be called frequently to monitor memory consumption. 810 result, so can be called frequently to monitor memory consumption.
811 Even if locks are otherwise defined, this function does not use them, 811 Even if locks are otherwise defined, this function does not use them,
812 so results might not be up to date. 812 so results might not be up to date.
813 */ 813 */
814 size_t dlmalloc_footprint (void); 814 size_t dlmalloc_footprint(void);
815 815
816 /* 816 /*
817 malloc_max_footprint(); 817 malloc_max_footprint();
818 Returns the maximum number of bytes obtained from the system. This 818 Returns the maximum number of bytes obtained from the system. This
819 value will be greater than current footprint if deallocated space 819 value will be greater than current footprint if deallocated space
822 this function returns only a precomputed result, so can be called 822 this function returns only a precomputed result, so can be called
823 frequently to monitor memory consumption. Even if locks are 823 frequently to monitor memory consumption. Even if locks are
824 otherwise defined, this function does not use them, so results might 824 otherwise defined, this function does not use them, so results might
825 not be up to date. 825 not be up to date.
826 */ 826 */
827 size_t dlmalloc_max_footprint (void); 827 size_t dlmalloc_max_footprint(void);
828 828
829 #if !NO_MALLINFO 829 #if !NO_MALLINFO
830 /* 830 /*
831 mallinfo() 831 mallinfo()
832 Returns (by copy) a struct containing various summary statistics: 832 Returns (by copy) a struct containing various summary statistics:
847 847
848 Because these fields are ints, but internal bookkeeping may 848 Because these fields are ints, but internal bookkeeping may
849 be kept as longs, the reported values may wrap around zero and 849 be kept as longs, the reported values may wrap around zero and
850 thus be inaccurate. 850 thus be inaccurate.
851 */ 851 */
852 struct mallinfo dlmallinfo (void); 852 struct mallinfo dlmallinfo(void);
853 #endif /* NO_MALLINFO */ 853 #endif /* NO_MALLINFO */
854 854
855 /* 855 /*
856 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); 856 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
857 857
902 pool[i]->next = pool[i+1]; 902 pool[i]->next = pool[i+1];
903 free(pool); // Can now free the array (or not, if it is needed later) 903 free(pool); // Can now free the array (or not, if it is needed later)
904 return first; 904 return first;
905 } 905 }
906 */ 906 */
907 void **dlindependent_calloc (size_t, size_t, void **); 907 void **dlindependent_calloc(size_t, size_t, void **);
908 908
909 /* 909 /*
910 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); 910 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
911 911
912 independent_comalloc allocates, all at once, a set of n_elements 912 independent_comalloc allocates, all at once, a set of n_elements
963 963
964 Overuse of independent_comalloc can increase overall memory usage, 964 Overuse of independent_comalloc can increase overall memory usage,
965 since it cannot reuse existing noncontiguous small chunks that 965 since it cannot reuse existing noncontiguous small chunks that
966 might be available for some of the elements. 966 might be available for some of the elements.
967 */ 967 */
968 void **dlindependent_comalloc (size_t, size_t *, void **); 968 void **dlindependent_comalloc(size_t, size_t *, void **);
969 969
970 970
971 /* 971 /*
972 pvalloc(size_t n); 972 pvalloc(size_t n);
973 Equivalent to valloc(minimum-page-that-holds(n)), that is, 973 Equivalent to valloc(minimum-page-that-holds(n)), that is,
974 round up n to nearest pagesize. 974 round up n to nearest pagesize.
975 */ 975 */
976 void *dlpvalloc (size_t); 976 void *dlpvalloc(size_t);
977 977
978 /* 978 /*
979 malloc_trim(size_t pad); 979 malloc_trim(size_t pad);
980 980
981 If possible, gives memory back to the system (via negative arguments 981 If possible, gives memory back to the system (via negative arguments
994 trailing space to service future expected allocations without having 994 trailing space to service future expected allocations without having
995 to re-obtain memory from the system. 995 to re-obtain memory from the system.
996 996
997 Malloc_trim returns 1 if it actually released any memory, else 0. 997 Malloc_trim returns 1 if it actually released any memory, else 0.
998 */ 998 */
999 int dlmalloc_trim (size_t); 999 int dlmalloc_trim(size_t);
1000 1000
1001 /* 1001 /*
1002 malloc_usable_size(void* p); 1002 malloc_usable_size(void* p);
1003 1003
1004 Returns the number of bytes you can actually use in 1004 Returns the number of bytes you can actually use in
1010 debugging and assertions, for example: 1010 debugging and assertions, for example:
1011 1011
1012 p = malloc(n); 1012 p = malloc(n);
1013 assert(malloc_usable_size(p) >= 256); 1013 assert(malloc_usable_size(p) >= 256);
1014 */ 1014 */
1015 size_t dlmalloc_usable_size (void *); 1015 size_t dlmalloc_usable_size(void *);
1016 1016
1017 /* 1017 /*
1018 malloc_stats(); 1018 malloc_stats();
1019 Prints on stderr the amount of space obtained from the system (both 1019 Prints on stderr the amount of space obtained from the system (both
1020 via sbrk and mmap), the maximum amount (which may be more than 1020 via sbrk and mmap), the maximum amount (which may be more than
1031 (normally sbrk) outside of malloc. 1031 (normally sbrk) outside of malloc.
1032 1032
1033 malloc_stats prints only the most commonly interesting statistics. 1033 malloc_stats prints only the most commonly interesting statistics.
1034 More information can be obtained by calling mallinfo. 1034 More information can be obtained by calling mallinfo.
1035 */ 1035 */
1036 void dlmalloc_stats (void); 1036 void dlmalloc_stats(void);
1037 1037
1038 #endif /* ONLY_MSPACES */ 1038 #endif /* ONLY_MSPACES */
1039 1039
1040 #if MSPACES 1040 #if MSPACES
1041 1041
1054 dynamically as needed to service mspace_malloc requests. You can 1054 dynamically as needed to service mspace_malloc requests. You can
1055 control the sizes of incremental increases of this space by 1055 control the sizes of incremental increases of this space by
1056 compiling with a different DEFAULT_GRANULARITY or dynamically 1056 compiling with a different DEFAULT_GRANULARITY or dynamically
1057 setting with mallopt(M_GRANULARITY, value). 1057 setting with mallopt(M_GRANULARITY, value).
1058 */ 1058 */
1059 mspace create_mspace (size_t capacity, int locked); 1059 mspace create_mspace(size_t capacity, int locked);
1060 1060
1061 /* 1061 /*
1062 destroy_mspace destroys the given space, and attempts to return all 1062 destroy_mspace destroys the given space, and attempts to return all
1063 of its memory back to the system, returning the total number of 1063 of its memory back to the system, returning the total number of
1064 bytes freed. After destruction, the results of access to all memory 1064 bytes freed. After destruction, the results of access to all memory
1065 used by the space become undefined. 1065 used by the space become undefined.
1066 */ 1066 */
1067 size_t destroy_mspace (mspace msp); 1067 size_t destroy_mspace(mspace msp);
1068 1068
1069 /* 1069 /*
1070 create_mspace_with_base uses the memory supplied as the initial base 1070 create_mspace_with_base uses the memory supplied as the initial base
1071 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this 1071 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1072 space is used for bookkeeping, so the capacity must be at least this 1072 space is used for bookkeeping, so the capacity must be at least this
1073 large. (Otherwise 0 is returned.) When this initial space is 1073 large. (Otherwise 0 is returned.) When this initial space is
1074 exhausted, additional memory will be obtained from the system. 1074 exhausted, additional memory will be obtained from the system.
1075 Destroying this space will deallocate all additionally allocated 1075 Destroying this space will deallocate all additionally allocated
1076 space (if possible) but not the initial base. 1076 space (if possible) but not the initial base.
1077 */ 1077 */
1078 mspace create_mspace_with_base (void *base, size_t capacity, int locked); 1078 mspace create_mspace_with_base(void *base, size_t capacity, int locked);
1079 1079
1080 /* 1080 /*
1081 mspace_malloc behaves as malloc, but operates within 1081 mspace_malloc behaves as malloc, but operates within
1082 the given space. 1082 the given space.
1083 */ 1083 */
1084 void *mspace_malloc (mspace msp, size_t bytes); 1084 void *mspace_malloc(mspace msp, size_t bytes);
1085 1085
1086 /* 1086 /*
1087 mspace_free behaves as free, but operates within 1087 mspace_free behaves as free, but operates within
1088 the given space. 1088 the given space.
1089 1089
1090 If compiled with FOOTERS==1, mspace_free is not actually needed. 1090 If compiled with FOOTERS==1, mspace_free is not actually needed.
1091 free may be called instead of mspace_free because freed chunks from 1091 free may be called instead of mspace_free because freed chunks from
1092 any space are handled by their originating spaces. 1092 any space are handled by their originating spaces.
1093 */ 1093 */
1094 void mspace_free (mspace msp, void *mem); 1094 void mspace_free(mspace msp, void *mem);
1095 1095
1096 /* 1096 /*
1097 mspace_realloc behaves as realloc, but operates within 1097 mspace_realloc behaves as realloc, but operates within
1098 the given space. 1098 the given space.
1099 1099
1100 If compiled with FOOTERS==1, mspace_realloc is not actually 1100 If compiled with FOOTERS==1, mspace_realloc is not actually
1101 needed. realloc may be called instead of mspace_realloc because 1101 needed. realloc may be called instead of mspace_realloc because
1102 realloced chunks from any space are handled by their originating 1102 realloced chunks from any space are handled by their originating
1103 spaces. 1103 spaces.
1104 */ 1104 */
1105 void *mspace_realloc (mspace msp, void *mem, size_t newsize); 1105 void *mspace_realloc(mspace msp, void *mem, size_t newsize);
1106 1106
1107 /* 1107 /*
1108 mspace_calloc behaves as calloc, but operates within 1108 mspace_calloc behaves as calloc, but operates within
1109 the given space. 1109 the given space.
1110 */ 1110 */
1111 void *mspace_calloc (mspace msp, size_t n_elements, size_t elem_size); 1111 void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1112 1112
1113 /* 1113 /*
1114 mspace_memalign behaves as memalign, but operates within 1114 mspace_memalign behaves as memalign, but operates within
1115 the given space. 1115 the given space.
1116 */ 1116 */
1117 void *mspace_memalign (mspace msp, size_t alignment, size_t bytes); 1117 void *mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1118 1118
1119 /* 1119 /*
1120 mspace_independent_calloc behaves as independent_calloc, but 1120 mspace_independent_calloc behaves as independent_calloc, but
1121 operates within the given space. 1121 operates within the given space.
1122 */ 1122 */
1123 void **mspace_independent_calloc (mspace msp, size_t n_elements, 1123 void **mspace_independent_calloc(mspace msp, size_t n_elements,
1124 size_t elem_size, void *chunks[]); 1124 size_t elem_size, void *chunks[]);
1125 1125
1126 /* 1126 /*
1127 mspace_independent_comalloc behaves as independent_comalloc, but 1127 mspace_independent_comalloc behaves as independent_comalloc, but
1128 operates within the given space. 1128 operates within the given space.
1129 */ 1129 */
1130 void **mspace_independent_comalloc (mspace msp, size_t n_elements, 1130 void **mspace_independent_comalloc(mspace msp, size_t n_elements,
1131 size_t sizes[], void *chunks[]); 1131 size_t sizes[], void *chunks[]);
1132 1132
1133 /* 1133 /*
1134 mspace_footprint() returns the number of bytes obtained from the 1134 mspace_footprint() returns the number of bytes obtained from the
1135 system for this space. 1135 system for this space.
1136 */ 1136 */
1137 size_t mspace_footprint (mspace msp); 1137 size_t mspace_footprint(mspace msp);
1138 1138
1139 /* 1139 /*
1140 mspace_max_footprint() returns the peak number of bytes obtained from the 1140 mspace_max_footprint() returns the peak number of bytes obtained from the
1141 system for this space. 1141 system for this space.
1142 */ 1142 */
1143 size_t mspace_max_footprint (mspace msp); 1143 size_t mspace_max_footprint(mspace msp);
1144 1144
1145 1145
1146 #if !NO_MALLINFO 1146 #if !NO_MALLINFO
1147 /* 1147 /*
1148 mspace_mallinfo behaves as mallinfo, but reports properties of 1148 mspace_mallinfo behaves as mallinfo, but reports properties of
1149 the given space. 1149 the given space.
1150 */ 1150 */
1151 struct mallinfo mspace_mallinfo (mspace msp); 1151 struct mallinfo mspace_mallinfo(mspace msp);
1152 #endif /* NO_MALLINFO */ 1152 #endif /* NO_MALLINFO */
1153 1153
1154 /* 1154 /*
1155 mspace_malloc_stats behaves as malloc_stats, but reports 1155 mspace_malloc_stats behaves as malloc_stats, but reports
1156 properties of the given space. 1156 properties of the given space.
1157 */ 1157 */
1158 void mspace_malloc_stats (mspace msp); 1158 void mspace_malloc_stats(mspace msp);
1159 1159
1160 /* 1160 /*
1161 mspace_trim behaves as malloc_trim, but 1161 mspace_trim behaves as malloc_trim, but
1162 operates within the given space. 1162 operates within the given space.
1163 */ 1163 */
1164 int mspace_trim (mspace msp, size_t pad); 1164 int mspace_trim(mspace msp, size_t pad);
1165 1165
1166 /* 1166 /*
1167 An alias for mallopt. 1167 An alias for mallopt.
1168 */ 1168 */
1169 int mspace_mallopt (int, int); 1169 int mspace_mallopt(int, int);
1170 1170
1171 #endif /* MSPACES */ 1171 #endif /* MSPACES */
1172 1172
1173 #ifdef __cplusplus 1173 #ifdef __cplusplus
1174 }; /* end of extern "C" */ 1174 }; /* end of extern "C" */
1231 #if HAVE_MORECORE 1231 #if HAVE_MORECORE
1232 #ifndef LACKS_UNISTD_H 1232 #ifndef LACKS_UNISTD_H
1233 #include <unistd.h> /* for sbrk */ 1233 #include <unistd.h> /* for sbrk */
1234 #else /* LACKS_UNISTD_H */ 1234 #else /* LACKS_UNISTD_H */
1235 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) 1235 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1236 extern void *sbrk (ptrdiff_t); 1236 extern void *sbrk(ptrdiff_t);
1237 #endif /* FreeBSD etc */ 1237 #endif /* FreeBSD etc */
1238 #endif /* LACKS_UNISTD_H */ 1238 #endif /* LACKS_UNISTD_H */
1239 #endif /* HAVE_MMAP */ 1239 #endif /* HAVE_MMAP */
1240 1240
1241 #ifndef WIN32 1241 #ifndef WIN32
1247 # endif 1247 # endif
1248 # ifdef _SC_PAGE_SIZE 1248 # ifdef _SC_PAGE_SIZE
1249 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) 1249 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1250 # else 1250 # else
1251 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) 1251 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1252 extern size_t getpagesize (); 1252 extern size_t getpagesize();
1253 # define malloc_getpagesize getpagesize() 1253 # define malloc_getpagesize getpagesize()
1254 # else 1254 # else
1255 # ifdef WIN32 /* use supplied emulation of getpagesize */ 1255 # ifdef WIN32 /* use supplied emulation of getpagesize */
1256 # define malloc_getpagesize getpagesize() 1256 # define malloc_getpagesize getpagesize()
1257 # else 1257 # else
1361 #define DIRECT_MMAP(s) CALL_MMAP(s) 1361 #define DIRECT_MMAP(s) CALL_MMAP(s)
1362 #else /* WIN32 */ 1362 #else /* WIN32 */
1363 1363
1364 /* Win32 MMAP via VirtualAlloc */ 1364 /* Win32 MMAP via VirtualAlloc */
1365 static void * 1365 static void *
1366 win32mmap (size_t size) 1366 win32mmap(size_t size)
1367 { 1367 {
1368 void *ptr = 1368 void *ptr =
1369 VirtualAlloc (0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); 1369 VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1370 return (ptr != 0) ? ptr : MFAIL; 1370 return (ptr != 0) ? ptr : MFAIL;
1371 } 1371 }
1372 1372
1373 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ 1373 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1374 static void * 1374 static void *
1375 win32direct_mmap (size_t size) 1375 win32direct_mmap(size_t size)
1376 { 1376 {
1377 void *ptr = 1377 void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
1378 VirtualAlloc (0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, 1378 PAGE_READWRITE);
1379 PAGE_READWRITE);
1380 return (ptr != 0) ? ptr : MFAIL; 1379 return (ptr != 0) ? ptr : MFAIL;
1381 } 1380 }
1382 1381
1383 /* This function supports releasing coalesed segments */ 1382 /* This function supports releasing coalesed segments */
1384 static int 1383 static int
1385 win32munmap (void *ptr, size_t size) 1384 win32munmap(void *ptr, size_t size)
1386 { 1385 {
1387 MEMORY_BASIC_INFORMATION minfo; 1386 MEMORY_BASIC_INFORMATION minfo;
1388 char *cptr = ptr; 1387 char *cptr = ptr;
1389 while (size) { 1388 while (size) {
1390 if (VirtualQuery (cptr, &minfo, sizeof (minfo)) == 0) 1389 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1391 return -1; 1390 return -1;
1392 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || 1391 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1393 minfo.State != MEM_COMMIT || minfo.RegionSize > size) 1392 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1394 return -1; 1393 return -1;
1395 if (VirtualFree (cptr, 0, MEM_RELEASE) == 0) 1394 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1396 return -1; 1395 return -1;
1397 cptr += minfo.RegionSize; 1396 cptr += minfo.RegionSize;
1398 size -= minfo.RegionSize; 1397 size -= minfo.RegionSize;
1399 } 1398 }
1400 return 0; 1399 return 0;
1463 are no recursive lock calls, we can use simple spinlocks. 1462 are no recursive lock calls, we can use simple spinlocks.
1464 */ 1463 */
1465 1464
1466 #define MLOCK_T long 1465 #define MLOCK_T long
1467 static int 1466 static int
1468 win32_acquire_lock (MLOCK_T * sl) 1467 win32_acquire_lock(MLOCK_T * sl)
1469 { 1468 {
1470 for (;;) { 1469 for (;;) {
1471 #ifdef InterlockedCompareExchangePointer 1470 #ifdef InterlockedCompareExchangePointer
1472 if (!InterlockedCompareExchange (sl, 1, 0)) 1471 if (!InterlockedCompareExchange(sl, 1, 0))
1473 return 0; 1472 return 0;
1474 #else /* Use older void* version */ 1473 #else /* Use older void* version */
1475 if (!InterlockedCompareExchange 1474 if (!InterlockedCompareExchange((void **) sl, (void *) 1, (void *) 0))
1476 ((void **) sl, (void *) 1, (void *) 0))
1477 return 0; 1475 return 0;
1478 #endif /* InterlockedCompareExchangePointer */ 1476 #endif /* InterlockedCompareExchangePointer */
1479 Sleep (0); 1477 Sleep(0);
1480 } 1478 }
1481 } 1479 }
1482 1480
1483 static void 1481 static void
1484 win32_release_lock (MLOCK_T * sl) 1482 win32_release_lock(MLOCK_T * sl)
1485 { 1483 {
1486 InterlockedExchange (sl, 0); 1484 InterlockedExchange(sl, 0);
1487 } 1485 }
1488 1486
1489 #define INITIAL_LOCK(l) *(l)=0 1487 #define INITIAL_LOCK(l) *(l)=0
1490 #define ACQUIRE_LOCK(l) win32_acquire_lock(l) 1488 #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
1491 #define RELEASE_LOCK(l) win32_release_lock(l) 1489 #define RELEASE_LOCK(l) win32_release_lock(l)
2124 #define segment_holds(S, A)\ 2122 #define segment_holds(S, A)\
2125 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) 2123 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2126 2124
2127 /* Return segment holding given address */ 2125 /* Return segment holding given address */
2128 static msegmentptr 2126 static msegmentptr
2129 segment_holding (mstate m, char *addr) 2127 segment_holding(mstate m, char *addr)
2130 { 2128 {
2131 msegmentptr sp = &m->seg; 2129 msegmentptr sp = &m->seg;
2132 for (;;) { 2130 for (;;) {
2133 if (addr >= sp->base && addr < sp->base + sp->size) 2131 if (addr >= sp->base && addr < sp->base + sp->size)
2134 return sp; 2132 return sp;
2137 } 2135 }
2138 } 2136 }
2139 2137
2140 /* Return true if segment contains a segment link */ 2138 /* Return true if segment contains a segment link */
2141 static int 2139 static int
2142 has_segment_link (mstate m, msegmentptr ss) 2140 has_segment_link(mstate m, msegmentptr ss)
2143 { 2141 {
2144 msegmentptr sp = &m->seg; 2142 msegmentptr sp = &m->seg;
2145 for (;;) { 2143 for (;;) {
2146 if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size) 2144 if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
2147 return 1; 2145 return 1;
2204 2202
2205 /* A count of the number of corruption errors causing resets */ 2203 /* A count of the number of corruption errors causing resets */
2206 int malloc_corruption_error_count; 2204 int malloc_corruption_error_count;
2207 2205
2208 /* default corruption action */ 2206 /* default corruption action */
2209 static void reset_on_error (mstate m); 2207 static void reset_on_error(mstate m);
2210 2208
2211 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) 2209 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2212 #define USAGE_ERROR_ACTION(m, p) 2210 #define USAGE_ERROR_ACTION(m, p)
2213 2211
2214 #else /* PROCEED_ON_ERROR */ 2212 #else /* PROCEED_ON_ERROR */
2240 #define check_top_chunk(M,P) do_check_top_chunk(M,P) 2238 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
2241 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) 2239 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2242 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) 2240 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2243 #define check_malloc_state(M) do_check_malloc_state(M) 2241 #define check_malloc_state(M) do_check_malloc_state(M)
2244 2242
2245 static void do_check_any_chunk (mstate m, mchunkptr p); 2243 static void do_check_any_chunk(mstate m, mchunkptr p);
2246 static void do_check_top_chunk (mstate m, mchunkptr p); 2244 static void do_check_top_chunk(mstate m, mchunkptr p);
2247 static void do_check_mmapped_chunk (mstate m, mchunkptr p); 2245 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
2248 static void do_check_inuse_chunk (mstate m, mchunkptr p); 2246 static void do_check_inuse_chunk(mstate m, mchunkptr p);
2249 static void do_check_free_chunk (mstate m, mchunkptr p); 2247 static void do_check_free_chunk(mstate m, mchunkptr p);
2250 static void do_check_malloced_chunk (mstate m, void *mem, size_t s); 2248 static void do_check_malloced_chunk(mstate m, void *mem, size_t s);
2251 static void do_check_tree (mstate m, tchunkptr t); 2249 static void do_check_tree(mstate m, tchunkptr t);
2252 static void do_check_treebin (mstate m, bindex_t i); 2250 static void do_check_treebin(mstate m, bindex_t i);
2253 static void do_check_smallbin (mstate m, bindex_t i); 2251 static void do_check_smallbin(mstate m, bindex_t i);
2254 static void do_check_malloc_state (mstate m); 2252 static void do_check_malloc_state(mstate m);
2255 static int bin_find (mstate m, mchunkptr x); 2253 static int bin_find(mstate m, mchunkptr x);
2256 static size_t traverse_and_check (mstate m); 2254 static size_t traverse_and_check(mstate m);
2257 #endif /* DEBUG */ 2255 #endif /* DEBUG */
2258 2256
2259 /* ---------------------------- Indexing Bins ---------------------------- */ 2257 /* ---------------------------- Indexing Bins ---------------------------- */
2260 2258
2261 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) 2259 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2482 2480
2483 /* ---------------------------- setting mparams -------------------------- */ 2481 /* ---------------------------- setting mparams -------------------------- */
2484 2482
2485 /* Initialize mparams */ 2483 /* Initialize mparams */
2486 static int 2484 static int
2487 init_mparams (void) 2485 init_mparams(void)
2488 { 2486 {
2489 if (mparams.page_size == 0) { 2487 if (mparams.page_size == 0) {
2490 size_t s; 2488 size_t s;
2491 2489
2492 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; 2490 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
2500 2498
2501 #if (FOOTERS && !INSECURE) 2499 #if (FOOTERS && !INSECURE)
2502 { 2500 {
2503 #if USE_DEV_RANDOM 2501 #if USE_DEV_RANDOM
2504 int fd; 2502 int fd;
2505 unsigned char buf[sizeof (size_t)]; 2503 unsigned char buf[sizeof(size_t)];
2506 /* Try to use /dev/urandom, else fall back on using time */ 2504 /* Try to use /dev/urandom, else fall back on using time */
2507 if ((fd = open ("/dev/urandom", O_RDONLY)) >= 0 && 2505 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
2508 read (fd, buf, sizeof (buf)) == sizeof (buf)) { 2506 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
2509 s = *((size_t *) buf); 2507 s = *((size_t *) buf);
2510 close (fd); 2508 close(fd);
2511 } else 2509 } else
2512 #endif /* USE_DEV_RANDOM */ 2510 #endif /* USE_DEV_RANDOM */
2513 s = (size_t) (time (0) ^ (size_t) 0x55555555U); 2511 s = (size_t) (time(0) ^ (size_t) 0x55555555U);
2514 2512
2515 s |= (size_t) 8U; /* ensure nonzero */ 2513 s |= (size_t) 8U; /* ensure nonzero */
2516 s &= ~(size_t) 7U; /* improve chances of fault for bad values */ 2514 s &= ~(size_t) 7U; /* improve chances of fault for bad values */
2517 2515
2518 } 2516 }
2519 #else /* (FOOTERS && !INSECURE) */ 2517 #else /* (FOOTERS && !INSECURE) */
2520 s = (size_t) 0x58585858U; 2518 s = (size_t) 0x58585858U;
2521 #endif /* (FOOTERS && !INSECURE) */ 2519 #endif /* (FOOTERS && !INSECURE) */
2522 ACQUIRE_MAGIC_INIT_LOCK (); 2520 ACQUIRE_MAGIC_INIT_LOCK();
2523 if (mparams.magic == 0) { 2521 if (mparams.magic == 0) {
2524 mparams.magic = s; 2522 mparams.magic = s;
2525 /* Set up lock for main malloc area */ 2523 /* Set up lock for main malloc area */
2526 INITIAL_LOCK (&gm->mutex); 2524 INITIAL_LOCK(&gm->mutex);
2527 gm->mflags = mparams.default_mflags; 2525 gm->mflags = mparams.default_mflags;
2528 } 2526 }
2529 RELEASE_MAGIC_INIT_LOCK (); 2527 RELEASE_MAGIC_INIT_LOCK();
2530 2528
2531 #ifndef WIN32 2529 #ifndef WIN32
2532 mparams.page_size = malloc_getpagesize; 2530 mparams.page_size = malloc_getpagesize;
2533 mparams.granularity = ((DEFAULT_GRANULARITY != 0) ? 2531 mparams.granularity = ((DEFAULT_GRANULARITY != 0) ?
2534 DEFAULT_GRANULARITY : mparams.page_size); 2532 DEFAULT_GRANULARITY : mparams.page_size);
2535 #else /* WIN32 */ 2533 #else /* WIN32 */
2536 { 2534 {
2537 SYSTEM_INFO system_info; 2535 SYSTEM_INFO system_info;
2538 GetSystemInfo (&system_info); 2536 GetSystemInfo(&system_info);
2539 mparams.page_size = system_info.dwPageSize; 2537 mparams.page_size = system_info.dwPageSize;
2540 mparams.granularity = system_info.dwAllocationGranularity; 2538 mparams.granularity = system_info.dwAllocationGranularity;
2541 } 2539 }
2542 #endif /* WIN32 */ 2540 #endif /* WIN32 */
2543 2541
2545 size_t must be unsigned and as wide as pointer type. 2543 size_t must be unsigned and as wide as pointer type.
2546 ints must be at least 4 bytes. 2544 ints must be at least 4 bytes.
2547 alignment must be at least 8. 2545 alignment must be at least 8.
2548 Alignment, min chunk size, and page size must all be powers of 2. 2546 Alignment, min chunk size, and page size must all be powers of 2.
2549 */ 2547 */
2550 if ((sizeof (size_t) != sizeof (char *)) || 2548 if ((sizeof(size_t) != sizeof(char *)) ||
2551 (MAX_SIZE_T < MIN_CHUNK_SIZE) || 2549 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
2552 (sizeof (int) < 4) || 2550 (sizeof(int) < 4) ||
2553 (MALLOC_ALIGNMENT < (size_t) 8U) || 2551 (MALLOC_ALIGNMENT < (size_t) 8U) ||
2554 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) || 2552 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
2555 ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) || 2553 ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
2556 ((mparams.granularity & (mparams.granularity - SIZE_T_ONE)) != 0) 2554 ((mparams.granularity & (mparams.granularity - SIZE_T_ONE)) != 0)
2557 || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0)) 2555 || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
2560 return 0; 2558 return 0;
2561 } 2559 }
2562 2560
2563 /* support for mallopt */ 2561 /* support for mallopt */
2564 static int 2562 static int
2565 change_mparam (int param_number, int value) 2563 change_mparam(int param_number, int value)
2566 { 2564 {
2567 size_t val = (size_t) value; 2565 size_t val = (size_t) value;
2568 init_mparams (); 2566 init_mparams();
2569 switch (param_number) { 2567 switch (param_number) {
2570 case M_TRIM_THRESHOLD: 2568 case M_TRIM_THRESHOLD:
2571 mparams.trim_threshold = val; 2569 mparams.trim_threshold = val;
2572 return 1; 2570 return 1;
2573 case M_GRANULARITY: 2571 case M_GRANULARITY:
2587 #if DEBUG 2585 #if DEBUG
2588 /* ------------------------- Debugging Support --------------------------- */ 2586 /* ------------------------- Debugging Support --------------------------- */
2589 2587
2590 /* Check properties of any chunk, whether free, inuse, mmapped etc */ 2588 /* Check properties of any chunk, whether free, inuse, mmapped etc */
2591 static void 2589 static void
2592 do_check_any_chunk (mstate m, mchunkptr p) 2590 do_check_any_chunk(mstate m, mchunkptr p)
2593 { 2591 {
2594 assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD)); 2592 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2595 assert (ok_address (m, p)); 2593 assert(ok_address(m, p));
2596 } 2594 }
2597 2595
2598 /* Check properties of top chunk */ 2596 /* Check properties of top chunk */
2599 static void 2597 static void
2600 do_check_top_chunk (mstate m, mchunkptr p) 2598 do_check_top_chunk(mstate m, mchunkptr p)
2601 { 2599 {
2602 msegmentptr sp = segment_holding (m, (char *) p); 2600 msegmentptr sp = segment_holding(m, (char *) p);
2603 size_t sz = chunksize (p); 2601 size_t sz = chunksize(p);
2604 assert (sp != 0); 2602 assert(sp != 0);
2605 assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD)); 2603 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2606 assert (ok_address (m, p)); 2604 assert(ok_address(m, p));
2607 assert (sz == m->topsize); 2605 assert(sz == m->topsize);
2608 assert (sz > 0); 2606 assert(sz > 0);
2609 assert (sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE); 2607 assert(sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
2610 assert (pinuse (p)); 2608 assert(pinuse(p));
2611 assert (!next_pinuse (p)); 2609 assert(!next_pinuse(p));
2612 } 2610 }
2613 2611
2614 /* Check properties of (inuse) mmapped chunks */ 2612 /* Check properties of (inuse) mmapped chunks */
2615 static void 2613 static void
2616 do_check_mmapped_chunk (mstate m, mchunkptr p) 2614 do_check_mmapped_chunk(mstate m, mchunkptr p)
2617 { 2615 {
2618 size_t sz = chunksize (p); 2616 size_t sz = chunksize(p);
2619 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); 2617 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
2620 assert (is_mmapped (p)); 2618 assert(is_mmapped(p));
2621 assert (use_mmap (m)); 2619 assert(use_mmap(m));
2622 assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD)); 2620 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2623 assert (ok_address (m, p)); 2621 assert(ok_address(m, p));
2624 assert (!is_small (sz)); 2622 assert(!is_small(sz));
2625 assert ((len & (mparams.page_size - SIZE_T_ONE)) == 0); 2623 assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
2626 assert (chunk_plus_offset (p, sz)->head == FENCEPOST_HEAD); 2624 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
2627 assert (chunk_plus_offset (p, sz + SIZE_T_SIZE)->head == 0); 2625 assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
2628 } 2626 }
2629 2627
2630 /* Check properties of inuse chunks */ 2628 /* Check properties of inuse chunks */
2631 static void 2629 static void
2632 do_check_inuse_chunk (mstate m, mchunkptr p) 2630 do_check_inuse_chunk(mstate m, mchunkptr p)
2633 { 2631 {
2634 do_check_any_chunk (m, p); 2632 do_check_any_chunk(m, p);
2635 assert (cinuse (p)); 2633 assert(cinuse(p));
2636 assert (next_pinuse (p)); 2634 assert(next_pinuse(p));
2637 /* If not pinuse and not mmapped, previous chunk has OK offset */ 2635 /* If not pinuse and not mmapped, previous chunk has OK offset */
2638 assert (is_mmapped (p) || pinuse (p) || next_chunk (prev_chunk (p)) == p); 2636 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
2639 if (is_mmapped (p)) 2637 if (is_mmapped(p))
2640 do_check_mmapped_chunk (m, p); 2638 do_check_mmapped_chunk(m, p);
2641 } 2639 }
2642 2640
2643 /* Check properties of free chunks */ 2641 /* Check properties of free chunks */
2644 static void 2642 static void
2645 do_check_free_chunk (mstate m, mchunkptr p) 2643 do_check_free_chunk(mstate m, mchunkptr p)
2646 { 2644 {
2647 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT); 2645 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2648 mchunkptr next = chunk_plus_offset (p, sz); 2646 mchunkptr next = chunk_plus_offset(p, sz);
2649 do_check_any_chunk (m, p); 2647 do_check_any_chunk(m, p);
2650 assert (!cinuse (p)); 2648 assert(!cinuse(p));
2651 assert (!next_pinuse (p)); 2649 assert(!next_pinuse(p));
2652 assert (!is_mmapped (p)); 2650 assert(!is_mmapped(p));
2653 if (p != m->dv && p != m->top) { 2651 if (p != m->dv && p != m->top) {
2654 if (sz >= MIN_CHUNK_SIZE) { 2652 if (sz >= MIN_CHUNK_SIZE) {
2655 assert ((sz & CHUNK_ALIGN_MASK) == 0); 2653 assert((sz & CHUNK_ALIGN_MASK) == 0);
2656 assert (is_aligned (chunk2mem (p))); 2654 assert(is_aligned(chunk2mem(p)));
2657 assert (next->prev_foot == sz); 2655 assert(next->prev_foot == sz);
2658 assert (pinuse (p)); 2656 assert(pinuse(p));
2659 assert (next == m->top || cinuse (next)); 2657 assert(next == m->top || cinuse(next));
2660 assert (p->fd->bk == p); 2658 assert(p->fd->bk == p);
2661 assert (p->bk->fd == p); 2659 assert(p->bk->fd == p);
2662 } else /* markers are always of size SIZE_T_SIZE */ 2660 } else /* markers are always of size SIZE_T_SIZE */
2663 assert (sz == SIZE_T_SIZE); 2661 assert(sz == SIZE_T_SIZE);
2664 } 2662 }
2665 } 2663 }
2666 2664
2667 /* Check properties of malloced chunks at the point they are malloced */ 2665 /* Check properties of malloced chunks at the point they are malloced */
2668 static void 2666 static void
2669 do_check_malloced_chunk (mstate m, void *mem, size_t s) 2667 do_check_malloced_chunk(mstate m, void *mem, size_t s)
2670 { 2668 {
2671 if (mem != 0) { 2669 if (mem != 0) {
2672 mchunkptr p = mem2chunk (mem); 2670 mchunkptr p = mem2chunk(mem);
2673 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT); 2671 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2674 do_check_inuse_chunk (m, p); 2672 do_check_inuse_chunk(m, p);
2675 assert ((sz & CHUNK_ALIGN_MASK) == 0); 2673 assert((sz & CHUNK_ALIGN_MASK) == 0);
2676 assert (sz >= MIN_CHUNK_SIZE); 2674 assert(sz >= MIN_CHUNK_SIZE);
2677 assert (sz >= s); 2675 assert(sz >= s);
2678 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ 2676 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
2679 assert (is_mmapped (p) || sz < (s + MIN_CHUNK_SIZE)); 2677 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
2680 } 2678 }
2681 } 2679 }
2682 2680
2683 /* Check a tree and its subtrees. */ 2681 /* Check a tree and its subtrees. */
2684 static void 2682 static void
2685 do_check_tree (mstate m, tchunkptr t) 2683 do_check_tree(mstate m, tchunkptr t)
2686 { 2684 {
2687 tchunkptr head = 0; 2685 tchunkptr head = 0;
2688 tchunkptr u = t; 2686 tchunkptr u = t;
2689 bindex_t tindex = t->index; 2687 bindex_t tindex = t->index;
2690 size_t tsize = chunksize (t); 2688 size_t tsize = chunksize(t);
2691 bindex_t idx; 2689 bindex_t idx;
2692 compute_tree_index (tsize, idx); 2690 compute_tree_index(tsize, idx);
2693 assert (tindex == idx); 2691 assert(tindex == idx);
2694 assert (tsize >= MIN_LARGE_SIZE); 2692 assert(tsize >= MIN_LARGE_SIZE);
2695 assert (tsize >= minsize_for_tree_index (idx)); 2693 assert(tsize >= minsize_for_tree_index(idx));
2696 assert ((idx == NTREEBINS - 1) 2694 assert((idx == NTREEBINS - 1)
2697 || (tsize < minsize_for_tree_index ((idx + 1)))); 2695 || (tsize < minsize_for_tree_index((idx + 1))));
2698 2696
2699 do { /* traverse through chain of same-sized nodes */ 2697 do { /* traverse through chain of same-sized nodes */
2700 do_check_any_chunk (m, ((mchunkptr) u)); 2698 do_check_any_chunk(m, ((mchunkptr) u));
2701 assert (u->index == tindex); 2699 assert(u->index == tindex);
2702 assert (chunksize (u) == tsize); 2700 assert(chunksize(u) == tsize);
2703 assert (!cinuse (u)); 2701 assert(!cinuse(u));
2704 assert (!next_pinuse (u)); 2702 assert(!next_pinuse(u));
2705 assert (u->fd->bk == u); 2703 assert(u->fd->bk == u);
2706 assert (u->bk->fd == u); 2704 assert(u->bk->fd == u);
2707 if (u->parent == 0) { 2705 if (u->parent == 0) {
2708 assert (u->child[0] == 0); 2706 assert(u->child[0] == 0);
2709 assert (u->child[1] == 0); 2707 assert(u->child[1] == 0);
2710 } else { 2708 } else {
2711 assert (head == 0); /* only one node on chain has parent */ 2709 assert(head == 0); /* only one node on chain has parent */
2712 head = u; 2710 head = u;
2713 assert (u->parent != u); 2711 assert(u->parent != u);
2714 assert (u->parent->child[0] == u || 2712 assert(u->parent->child[0] == u ||
2715 u->parent->child[1] == u || 2713 u->parent->child[1] == u ||
2716 *((tbinptr *) (u->parent)) == u); 2714 *((tbinptr *) (u->parent)) == u);
2717 if (u->child[0] != 0) { 2715 if (u->child[0] != 0) {
2718 assert (u->child[0]->parent == u); 2716 assert(u->child[0]->parent == u);
2719 assert (u->child[0] != u); 2717 assert(u->child[0] != u);
2720 do_check_tree (m, u->child[0]); 2718 do_check_tree(m, u->child[0]);
2721 } 2719 }
2722 if (u->child[1] != 0) { 2720 if (u->child[1] != 0) {
2723 assert (u->child[1]->parent == u); 2721 assert(u->child[1]->parent == u);
2724 assert (u->child[1] != u); 2722 assert(u->child[1] != u);
2725 do_check_tree (m, u->child[1]); 2723 do_check_tree(m, u->child[1]);
2726 } 2724 }
2727 if (u->child[0] != 0 && u->child[1] != 0) { 2725 if (u->child[0] != 0 && u->child[1] != 0) {
2728 assert (chunksize (u->child[0]) < chunksize (u->child[1])); 2726 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
2729 } 2727 }
2730 } 2728 }
2731 u = u->fd; 2729 u = u->fd;
2732 } 2730 }
2733 while (u != t); 2731 while (u != t);
2734 assert (head != 0); 2732 assert(head != 0);
2735 } 2733 }
2736 2734
2737 /* Check all the chunks in a treebin. */ 2735 /* Check all the chunks in a treebin. */
2738 static void 2736 static void
2739 do_check_treebin (mstate m, bindex_t i) 2737 do_check_treebin(mstate m, bindex_t i)
2740 { 2738 {
2741 tbinptr *tb = treebin_at (m, i); 2739 tbinptr *tb = treebin_at(m, i);
2742 tchunkptr t = *tb; 2740 tchunkptr t = *tb;
2743 int empty = (m->treemap & (1U << i)) == 0; 2741 int empty = (m->treemap & (1U << i)) == 0;
2744 if (t == 0) 2742 if (t == 0)
2745 assert (empty); 2743 assert(empty);
2746 if (!empty) 2744 if (!empty)
2747 do_check_tree (m, t); 2745 do_check_tree(m, t);
2748 } 2746 }
2749 2747
2750 /* Check all the chunks in a smallbin. */ 2748 /* Check all the chunks in a smallbin. */
2751 static void 2749 static void
2752 do_check_smallbin (mstate m, bindex_t i) 2750 do_check_smallbin(mstate m, bindex_t i)
2753 { 2751 {
2754 sbinptr b = smallbin_at (m, i); 2752 sbinptr b = smallbin_at(m, i);
2755 mchunkptr p = b->bk; 2753 mchunkptr p = b->bk;
2756 unsigned int empty = (m->smallmap & (1U << i)) == 0; 2754 unsigned int empty = (m->smallmap & (1U << i)) == 0;
2757 if (p == b) 2755 if (p == b)
2758 assert (empty); 2756 assert(empty);
2759 if (!empty) { 2757 if (!empty) {
2760 for (; p != b; p = p->bk) { 2758 for (; p != b; p = p->bk) {
2761 size_t size = chunksize (p); 2759 size_t size = chunksize(p);
2762 mchunkptr q; 2760 mchunkptr q;
2763 /* each chunk claims to be free */ 2761 /* each chunk claims to be free */
2764 do_check_free_chunk (m, p); 2762 do_check_free_chunk(m, p);
2765 /* chunk belongs in bin */ 2763 /* chunk belongs in bin */
2766 assert (small_index (size) == i); 2764 assert(small_index(size) == i);
2767 assert (p->bk == b || chunksize (p->bk) == chunksize (p)); 2765 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
2768 /* chunk is followed by an inuse chunk */ 2766 /* chunk is followed by an inuse chunk */
2769 q = next_chunk (p); 2767 q = next_chunk(p);
2770 if (q->head != FENCEPOST_HEAD) 2768 if (q->head != FENCEPOST_HEAD)
2771 do_check_inuse_chunk (m, q); 2769 do_check_inuse_chunk(m, q);
2772 } 2770 }
2773 } 2771 }
2774 } 2772 }
2775 2773
2776 /* Find x in a bin. Used in other check functions. */ 2774 /* Find x in a bin. Used in other check functions. */
2777 static int 2775 static int
2778 bin_find (mstate m, mchunkptr x) 2776 bin_find(mstate m, mchunkptr x)
2779 { 2777 {
2780 size_t size = chunksize (x); 2778 size_t size = chunksize(x);
2781 if (is_small (size)) { 2779 if (is_small(size)) {
2782 bindex_t sidx = small_index (size); 2780 bindex_t sidx = small_index(size);
2783 sbinptr b = smallbin_at (m, sidx); 2781 sbinptr b = smallbin_at(m, sidx);
2784 if (smallmap_is_marked (m, sidx)) { 2782 if (smallmap_is_marked(m, sidx)) {
2785 mchunkptr p = b; 2783 mchunkptr p = b;
2786 do { 2784 do {
2787 if (p == x) 2785 if (p == x)
2788 return 1; 2786 return 1;
2789 } 2787 }
2790 while ((p = p->fd) != b); 2788 while ((p = p->fd) != b);
2791 } 2789 }
2792 } else { 2790 } else {
2793 bindex_t tidx; 2791 bindex_t tidx;
2794 compute_tree_index (size, tidx); 2792 compute_tree_index(size, tidx);
2795 if (treemap_is_marked (m, tidx)) { 2793 if (treemap_is_marked(m, tidx)) {
2796 tchunkptr t = *treebin_at (m, tidx); 2794 tchunkptr t = *treebin_at(m, tidx);
2797 size_t sizebits = size << leftshift_for_tree_index (tidx); 2795 size_t sizebits = size << leftshift_for_tree_index(tidx);
2798 while (t != 0 && chunksize (t) != size) { 2796 while (t != 0 && chunksize(t) != size) {
2799 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]; 2797 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
2800 sizebits <<= 1; 2798 sizebits <<= 1;
2801 } 2799 }
2802 if (t != 0) { 2800 if (t != 0) {
2803 tchunkptr u = t; 2801 tchunkptr u = t;
2812 return 0; 2810 return 0;
2813 } 2811 }
2814 2812
2815 /* Traverse each chunk and check it; return total */ 2813 /* Traverse each chunk and check it; return total */
2816 static size_t 2814 static size_t
2817 traverse_and_check (mstate m) 2815 traverse_and_check(mstate m)
2818 { 2816 {
2819 size_t sum = 0; 2817 size_t sum = 0;
2820 if (is_initialized (m)) { 2818 if (is_initialized(m)) {
2821 msegmentptr s = &m->seg; 2819 msegmentptr s = &m->seg;
2822 sum += m->topsize + TOP_FOOT_SIZE; 2820 sum += m->topsize + TOP_FOOT_SIZE;
2823 while (s != 0) { 2821 while (s != 0) {
2824 mchunkptr q = align_as_chunk (s->base); 2822 mchunkptr q = align_as_chunk(s->base);
2825 mchunkptr lastq = 0; 2823 mchunkptr lastq = 0;
2826 assert (pinuse (q)); 2824 assert(pinuse(q));
2827 while (segment_holds (s, q) && 2825 while (segment_holds(s, q) &&
2828 q != m->top && q->head != FENCEPOST_HEAD) { 2826 q != m->top && q->head != FENCEPOST_HEAD) {
2829 sum += chunksize (q); 2827 sum += chunksize(q);
2830 if (cinuse (q)) { 2828 if (cinuse(q)) {
2831 assert (!bin_find (m, q)); 2829 assert(!bin_find(m, q));
2832 do_check_inuse_chunk (m, q); 2830 do_check_inuse_chunk(m, q);
2833 } else { 2831 } else {
2834 assert (q == m->dv || bin_find (m, q)); 2832 assert(q == m->dv || bin_find(m, q));
2835 assert (lastq == 0 || cinuse (lastq)); /* Not 2 consecutive free */ 2833 assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
2836 do_check_free_chunk (m, q); 2834 do_check_free_chunk(m, q);
2837 } 2835 }
2838 lastq = q; 2836 lastq = q;
2839 q = next_chunk (q); 2837 q = next_chunk(q);
2840 } 2838 }
2841 s = s->next; 2839 s = s->next;
2842 } 2840 }
2843 } 2841 }
2844 return sum; 2842 return sum;
2845 } 2843 }
2846 2844
2847 /* Check all properties of malloc_state. */ 2845 /* Check all properties of malloc_state. */
2848 static void 2846 static void
2849 do_check_malloc_state (mstate m) 2847 do_check_malloc_state(mstate m)
2850 { 2848 {
2851 bindex_t i; 2849 bindex_t i;
2852 size_t total; 2850 size_t total;
2853 /* check bins */ 2851 /* check bins */
2854 for (i = 0; i < NSMALLBINS; ++i) 2852 for (i = 0; i < NSMALLBINS; ++i)
2855 do_check_smallbin (m, i); 2853 do_check_smallbin(m, i);
2856 for (i = 0; i < NTREEBINS; ++i) 2854 for (i = 0; i < NTREEBINS; ++i)
2857 do_check_treebin (m, i); 2855 do_check_treebin(m, i);
2858 2856
2859 if (m->dvsize != 0) { /* check dv chunk */ 2857 if (m->dvsize != 0) { /* check dv chunk */
2860 do_check_any_chunk (m, m->dv); 2858 do_check_any_chunk(m, m->dv);
2861 assert (m->dvsize == chunksize (m->dv)); 2859 assert(m->dvsize == chunksize(m->dv));
2862 assert (m->dvsize >= MIN_CHUNK_SIZE); 2860 assert(m->dvsize >= MIN_CHUNK_SIZE);
2863 assert (bin_find (m, m->dv) == 0); 2861 assert(bin_find(m, m->dv) == 0);
2864 } 2862 }
2865 2863
2866 if (m->top != 0) { /* check top chunk */ 2864 if (m->top != 0) { /* check top chunk */
2867 do_check_top_chunk (m, m->top); 2865 do_check_top_chunk(m, m->top);
2868 assert (m->topsize == chunksize (m->top)); 2866 assert(m->topsize == chunksize(m->top));
2869 assert (m->topsize > 0); 2867 assert(m->topsize > 0);
2870 assert (bin_find (m, m->top) == 0); 2868 assert(bin_find(m, m->top) == 0);
2871 } 2869 }
2872 2870
2873 total = traverse_and_check (m); 2871 total = traverse_and_check(m);
2874 assert (total <= m->footprint); 2872 assert(total <= m->footprint);
2875 assert (m->footprint <= m->max_footprint); 2873 assert(m->footprint <= m->max_footprint);
2876 } 2874 }
2877 #endif /* DEBUG */ 2875 #endif /* DEBUG */
2878 2876
2879 /* ----------------------------- statistics ------------------------------ */ 2877 /* ----------------------------- statistics ------------------------------ */
2880 2878
2881 #if !NO_MALLINFO 2879 #if !NO_MALLINFO
2882 static struct mallinfo 2880 static struct mallinfo
2883 internal_mallinfo (mstate m) 2881 internal_mallinfo(mstate m)
2884 { 2882 {
2885 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 2883 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2886 if (!PREACTION (m)) { 2884 if (!PREACTION(m)) {
2887 check_malloc_state (m); 2885 check_malloc_state(m);
2888 if (is_initialized (m)) { 2886 if (is_initialized(m)) {
2889 size_t nfree = SIZE_T_ONE; /* top always free */ 2887 size_t nfree = SIZE_T_ONE; /* top always free */
2890 size_t mfree = m->topsize + TOP_FOOT_SIZE; 2888 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2891 size_t sum = mfree; 2889 size_t sum = mfree;
2892 msegmentptr s = &m->seg; 2890 msegmentptr s = &m->seg;
2893 while (s != 0) { 2891 while (s != 0) {
2894 mchunkptr q = align_as_chunk (s->base); 2892 mchunkptr q = align_as_chunk(s->base);
2895 while (segment_holds (s, q) && 2893 while (segment_holds(s, q) &&
2896 q != m->top && q->head != FENCEPOST_HEAD) { 2894 q != m->top && q->head != FENCEPOST_HEAD) {
2897 size_t sz = chunksize (q); 2895 size_t sz = chunksize(q);
2898 sum += sz; 2896 sum += sz;
2899 if (!cinuse (q)) { 2897 if (!cinuse(q)) {
2900 mfree += sz; 2898 mfree += sz;
2901 ++nfree; 2899 ++nfree;
2902 } 2900 }
2903 q = next_chunk (q); 2901 q = next_chunk(q);
2904 } 2902 }
2905 s = s->next; 2903 s = s->next;
2906 } 2904 }
2907 2905
2908 nm.arena = sum; 2906 nm.arena = sum;
2912 nm.uordblks = m->footprint - mfree; 2910 nm.uordblks = m->footprint - mfree;
2913 nm.fordblks = mfree; 2911 nm.fordblks = mfree;
2914 nm.keepcost = m->topsize; 2912 nm.keepcost = m->topsize;
2915 } 2913 }
2916 2914
2917 POSTACTION (m); 2915 POSTACTION(m);
2918 } 2916 }
2919 return nm; 2917 return nm;
2920 } 2918 }
2921 #endif /* !NO_MALLINFO */ 2919 #endif /* !NO_MALLINFO */
2922 2920
2923 static void 2921 static void
2924 internal_malloc_stats (mstate m) 2922 internal_malloc_stats(mstate m)
2925 { 2923 {
2926 if (!PREACTION (m)) { 2924 if (!PREACTION(m)) {
2927 size_t maxfp = 0; 2925 size_t maxfp = 0;
2928 size_t fp = 0; 2926 size_t fp = 0;
2929 size_t used = 0; 2927 size_t used = 0;
2930 check_malloc_state (m); 2928 check_malloc_state(m);
2931 if (is_initialized (m)) { 2929 if (is_initialized(m)) {
2932 msegmentptr s = &m->seg; 2930 msegmentptr s = &m->seg;
2933 maxfp = m->max_footprint; 2931 maxfp = m->max_footprint;
2934 fp = m->footprint; 2932 fp = m->footprint;
2935 used = fp - (m->topsize + TOP_FOOT_SIZE); 2933 used = fp - (m->topsize + TOP_FOOT_SIZE);
2936 2934
2937 while (s != 0) { 2935 while (s != 0) {
2938 mchunkptr q = align_as_chunk (s->base); 2936 mchunkptr q = align_as_chunk(s->base);
2939 while (segment_holds (s, q) && 2937 while (segment_holds(s, q) &&
2940 q != m->top && q->head != FENCEPOST_HEAD) { 2938 q != m->top && q->head != FENCEPOST_HEAD) {
2941 if (!cinuse (q)) 2939 if (!cinuse(q))
2942 used -= chunksize (q); 2940 used -= chunksize(q);
2943 q = next_chunk (q); 2941 q = next_chunk(q);
2944 } 2942 }
2945 s = s->next; 2943 s = s->next;
2946 } 2944 }
2947 } 2945 }
2948 #ifndef LACKS_STDIO_H 2946 #ifndef LACKS_STDIO_H
2949 fprintf (stderr, "max system bytes = %10lu\n", 2947 fprintf(stderr, "max system bytes = %10lu\n",
2950 (unsigned long) (maxfp)); 2948 (unsigned long) (maxfp));
2951 fprintf (stderr, "system bytes = %10lu\n", (unsigned long) (fp)); 2949 fprintf(stderr, "system bytes = %10lu\n", (unsigned long) (fp));
2952 fprintf (stderr, "in use bytes = %10lu\n", 2950 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long) (used));
2953 (unsigned long) (used));
2954 #endif 2951 #endif
2955 2952
2956 POSTACTION (m); 2953 POSTACTION(m);
2957 } 2954 }
2958 } 2955 }
2959 2956
2960 /* ----------------------- Operations on smallbins ----------------------- */ 2957 /* ----------------------- Operations on smallbins ----------------------- */
2961 2958
3217 the PINUSE bit so frees can be checked. 3214 the PINUSE bit so frees can be checked.
3218 */ 3215 */
3219 3216
3220 /* Malloc using mmap */ 3217 /* Malloc using mmap */
3221 static void * 3218 static void *
3222 mmap_alloc (mstate m, size_t nb) 3219 mmap_alloc(mstate m, size_t nb)
3223 { 3220 {
3224 size_t mmsize = 3221 size_t mmsize =
3225 granularity_align (nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); 3222 granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3226 if (mmsize > nb) { /* Check for wrap around 0 */ 3223 if (mmsize > nb) { /* Check for wrap around 0 */
3227 char *mm = (char *) (DIRECT_MMAP (mmsize)); 3224 char *mm = (char *) (DIRECT_MMAP(mmsize));
3228 if (mm != CMFAIL) { 3225 if (mm != CMFAIL) {
3229 size_t offset = align_offset (chunk2mem (mm)); 3226 size_t offset = align_offset(chunk2mem(mm));
3230 size_t psize = mmsize - offset - MMAP_FOOT_PAD; 3227 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3231 mchunkptr p = (mchunkptr) (mm + offset); 3228 mchunkptr p = (mchunkptr) (mm + offset);
3232 p->prev_foot = offset | IS_MMAPPED_BIT; 3229 p->prev_foot = offset | IS_MMAPPED_BIT;
3233 (p)->head = (psize | CINUSE_BIT); 3230 (p)->head = (psize | CINUSE_BIT);
3234 mark_inuse_foot (m, p, psize); 3231 mark_inuse_foot(m, p, psize);
3235 chunk_plus_offset (p, psize)->head = FENCEPOST_HEAD; 3232 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3236 chunk_plus_offset (p, psize + SIZE_T_SIZE)->head = 0; 3233 chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
3237 3234
3238 if (mm < m->least_addr) 3235 if (mm < m->least_addr)
3239 m->least_addr = mm; 3236 m->least_addr = mm;
3240 if ((m->footprint += mmsize) > m->max_footprint) 3237 if ((m->footprint += mmsize) > m->max_footprint)
3241 m->max_footprint = m->footprint; 3238 m->max_footprint = m->footprint;
3242 assert (is_aligned (chunk2mem (p))); 3239 assert(is_aligned(chunk2mem(p)));
3243 check_mmapped_chunk (m, p); 3240 check_mmapped_chunk(m, p);
3244 return chunk2mem (p); 3241 return chunk2mem(p);
3245 } 3242 }
3246 } 3243 }
3247 return 0; 3244 return 0;
3248 } 3245 }
3249 3246
3250 /* Realloc using mmap */ 3247 /* Realloc using mmap */
3251 static mchunkptr 3248 static mchunkptr
3252 mmap_resize (mstate m, mchunkptr oldp, size_t nb) 3249 mmap_resize(mstate m, mchunkptr oldp, size_t nb)
3253 { 3250 {
3254 size_t oldsize = chunksize (oldp); 3251 size_t oldsize = chunksize(oldp);
3255 if (is_small (nb)) /* Can't shrink mmap regions below small size */ 3252 if (is_small(nb)) /* Can't shrink mmap regions below small size */
3256 return 0; 3253 return 0;
3257 /* Keep old chunk if big enough but not too big */ 3254 /* Keep old chunk if big enough but not too big */
3258 if (oldsize >= nb + SIZE_T_SIZE && 3255 if (oldsize >= nb + SIZE_T_SIZE &&
3259 (oldsize - nb) <= (mparams.granularity << 1)) 3256 (oldsize - nb) <= (mparams.granularity << 1))
3260 return oldp; 3257 return oldp;
3261 else { 3258 else {
3262 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; 3259 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3263 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; 3260 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3264 size_t newmmsize = granularity_align (nb + SIX_SIZE_T_SIZES + 3261 size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
3265 CHUNK_ALIGN_MASK); 3262 CHUNK_ALIGN_MASK);
3266 char *cp = (char *) CALL_MREMAP ((char *) oldp - offset, 3263 char *cp = (char *) CALL_MREMAP((char *) oldp - offset,
3267 oldmmsize, newmmsize, 1); 3264 oldmmsize, newmmsize, 1);
3268 if (cp != CMFAIL) { 3265 if (cp != CMFAIL) {
3269 mchunkptr newp = (mchunkptr) (cp + offset); 3266 mchunkptr newp = (mchunkptr) (cp + offset);
3270 size_t psize = newmmsize - offset - MMAP_FOOT_PAD; 3267 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3271 newp->head = (psize | CINUSE_BIT); 3268 newp->head = (psize | CINUSE_BIT);
3272 mark_inuse_foot (m, newp, psize); 3269 mark_inuse_foot(m, newp, psize);
3273 chunk_plus_offset (newp, psize)->head = FENCEPOST_HEAD; 3270 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3274 chunk_plus_offset (newp, psize + SIZE_T_SIZE)->head = 0; 3271 chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
3275 3272
3276 if (cp < m->least_addr) 3273 if (cp < m->least_addr)
3277 m->least_addr = cp; 3274 m->least_addr = cp;
3278 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) 3275 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3279 m->max_footprint = m->footprint; 3276 m->max_footprint = m->footprint;
3280 check_mmapped_chunk (m, newp); 3277 check_mmapped_chunk(m, newp);
3281 return newp; 3278 return newp;
3282 } 3279 }
3283 } 3280 }
3284 return 0; 3281 return 0;
3285 } 3282 }
3286 3283
3287 /* -------------------------- mspace management -------------------------- */ 3284 /* -------------------------- mspace management -------------------------- */
3288 3285
3289 /* Initialize top chunk and its size */ 3286 /* Initialize top chunk and its size */
3290 static void 3287 static void
3291 init_top (mstate m, mchunkptr p, size_t psize) 3288 init_top(mstate m, mchunkptr p, size_t psize)
3292 { 3289 {
3293 /* Ensure alignment */ 3290 /* Ensure alignment */
3294 size_t offset = align_offset (chunk2mem (p)); 3291 size_t offset = align_offset(chunk2mem(p));
3295 p = (mchunkptr) ((char *) p + offset); 3292 p = (mchunkptr) ((char *) p + offset);
3296 psize -= offset; 3293 psize -= offset;
3297 3294
3298 m->top = p; 3295 m->top = p;
3299 m->topsize = psize; 3296 m->topsize = psize;
3300 p->head = psize | PINUSE_BIT; 3297 p->head = psize | PINUSE_BIT;
3301 /* set size of fake trailing chunk holding overhead space only once */ 3298 /* set size of fake trailing chunk holding overhead space only once */
3302 chunk_plus_offset (p, psize)->head = TOP_FOOT_SIZE; 3299 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3303 m->trim_check = mparams.trim_threshold; /* reset on each update */ 3300 m->trim_check = mparams.trim_threshold; /* reset on each update */
3304 } 3301 }
3305 3302
3306 /* Initialize bins for a new mstate that is otherwise zeroed out */ 3303 /* Initialize bins for a new mstate that is otherwise zeroed out */
3307 static void 3304 static void
3308 init_bins (mstate m) 3305 init_bins(mstate m)
3309 { 3306 {
3310 /* Establish circular links for smallbins */ 3307 /* Establish circular links for smallbins */
3311 bindex_t i; 3308 bindex_t i;
3312 for (i = 0; i < NSMALLBINS; ++i) { 3309 for (i = 0; i < NSMALLBINS; ++i) {
3313 sbinptr bin = smallbin_at (m, i); 3310 sbinptr bin = smallbin_at(m, i);
3314 bin->fd = bin->bk = bin; 3311 bin->fd = bin->bk = bin;
3315 } 3312 }
3316 } 3313 }
3317 3314
3318 #if PROCEED_ON_ERROR 3315 #if PROCEED_ON_ERROR
3319 3316
3320 /* default corruption action */ 3317 /* default corruption action */
3321 static void 3318 static void
3322 reset_on_error (mstate m) 3319 reset_on_error(mstate m)
3323 { 3320 {
3324 int i; 3321 int i;
3325 ++malloc_corruption_error_count; 3322 ++malloc_corruption_error_count;
3326 /* Reinitialize fields to forget about all memory */ 3323 /* Reinitialize fields to forget about all memory */
3327 m->smallbins = m->treebins = 0; 3324 m->smallbins = m->treebins = 0;
3329 m->seg.base = 0; 3326 m->seg.base = 0;
3330 m->seg.size = 0; 3327 m->seg.size = 0;
3331 m->seg.next = 0; 3328 m->seg.next = 0;
3332 m->top = m->dv = 0; 3329 m->top = m->dv = 0;
3333 for (i = 0; i < NTREEBINS; ++i) 3330 for (i = 0; i < NTREEBINS; ++i)
3334 *treebin_at (m, i) = 0; 3331 *treebin_at(m, i) = 0;
3335 init_bins (m); 3332 init_bins(m);
3336 } 3333 }
3337 #endif /* PROCEED_ON_ERROR */ 3334 #endif /* PROCEED_ON_ERROR */
3338 3335
3339 /* Allocate chunk and prepend remainder with chunk in successor base. */ 3336 /* Allocate chunk and prepend remainder with chunk in successor base. */
3340 static void * 3337 static void *
3341 prepend_alloc (mstate m, char *newbase, char *oldbase, size_t nb) 3338 prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
3342 { 3339 {
3343 mchunkptr p = align_as_chunk (newbase); 3340 mchunkptr p = align_as_chunk(newbase);
3344 mchunkptr oldfirst = align_as_chunk (oldbase); 3341 mchunkptr oldfirst = align_as_chunk(oldbase);
3345 size_t psize = (char *) oldfirst - (char *) p; 3342 size_t psize = (char *) oldfirst - (char *) p;
3346 mchunkptr q = chunk_plus_offset (p, nb); 3343 mchunkptr q = chunk_plus_offset(p, nb);
3347 size_t qsize = psize - nb; 3344 size_t qsize = psize - nb;
3348 set_size_and_pinuse_of_inuse_chunk (m, p, nb); 3345 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3349 3346
3350 assert ((char *) oldfirst > (char *) q); 3347 assert((char *) oldfirst > (char *) q);
3351 assert (pinuse (oldfirst)); 3348 assert(pinuse(oldfirst));
3352 assert (qsize >= MIN_CHUNK_SIZE); 3349 assert(qsize >= MIN_CHUNK_SIZE);
3353 3350
3354 /* consolidate remainder with first chunk of old base */ 3351 /* consolidate remainder with first chunk of old base */
3355 if (oldfirst == m->top) { 3352 if (oldfirst == m->top) {
3356 size_t tsize = m->topsize += qsize; 3353 size_t tsize = m->topsize += qsize;
3357 m->top = q; 3354 m->top = q;
3358 q->head = tsize | PINUSE_BIT; 3355 q->head = tsize | PINUSE_BIT;
3359 check_top_chunk (m, q); 3356 check_top_chunk(m, q);
3360 } else if (oldfirst == m->dv) { 3357 } else if (oldfirst == m->dv) {
3361 size_t dsize = m->dvsize += qsize; 3358 size_t dsize = m->dvsize += qsize;
3362 m->dv = q; 3359 m->dv = q;
3363 set_size_and_pinuse_of_free_chunk (q, dsize); 3360 set_size_and_pinuse_of_free_chunk(q, dsize);
3364 } else { 3361 } else {
3365 if (!cinuse (oldfirst)) { 3362 if (!cinuse(oldfirst)) {
3366 size_t nsize = chunksize (oldfirst); 3363 size_t nsize = chunksize(oldfirst);
3367 unlink_chunk (m, oldfirst, nsize); 3364 unlink_chunk(m, oldfirst, nsize);
3368 oldfirst = chunk_plus_offset (oldfirst, nsize); 3365 oldfirst = chunk_plus_offset(oldfirst, nsize);
3369 qsize += nsize; 3366 qsize += nsize;
3370 } 3367 }
3371 set_free_with_pinuse (q, qsize, oldfirst); 3368 set_free_with_pinuse(q, qsize, oldfirst);
3372 insert_chunk (m, q, qsize); 3369 insert_chunk(m, q, qsize);
3373 check_free_chunk (m, q); 3370 check_free_chunk(m, q);
3374 } 3371 }
3375 3372
3376 check_malloced_chunk (m, chunk2mem (p), nb); 3373 check_malloced_chunk(m, chunk2mem(p), nb);
3377 return chunk2mem (p); 3374 return chunk2mem(p);
3378 } 3375 }
3379 3376
3380 3377
3381 /* Add a segment to hold a new noncontiguous region */ 3378 /* Add a segment to hold a new noncontiguous region */
3382 static void 3379 static void
3383 add_segment (mstate m, char *tbase, size_t tsize, flag_t mmapped) 3380 add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
3384 { 3381 {
3385 /* Determine locations and sizes of segment, fenceposts, old top */ 3382 /* Determine locations and sizes of segment, fenceposts, old top */
3386 char *old_top = (char *) m->top; 3383 char *old_top = (char *) m->top;
3387 msegmentptr oldsp = segment_holding (m, old_top); 3384 msegmentptr oldsp = segment_holding(m, old_top);
3388 char *old_end = oldsp->base + oldsp->size; 3385 char *old_end = oldsp->base + oldsp->size;
3389 size_t ssize = pad_request (sizeof (struct malloc_segment)); 3386 size_t ssize = pad_request(sizeof(struct malloc_segment));
3390 char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); 3387 char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3391 size_t offset = align_offset (chunk2mem (rawsp)); 3388 size_t offset = align_offset(chunk2mem(rawsp));
3392 char *asp = rawsp + offset; 3389 char *asp = rawsp + offset;
3393 char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp; 3390 char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
3394 mchunkptr sp = (mchunkptr) csp; 3391 mchunkptr sp = (mchunkptr) csp;
3395 msegmentptr ss = (msegmentptr) (chunk2mem (sp)); 3392 msegmentptr ss = (msegmentptr) (chunk2mem(sp));
3396 mchunkptr tnext = chunk_plus_offset (sp, ssize); 3393 mchunkptr tnext = chunk_plus_offset(sp, ssize);
3397 mchunkptr p = tnext; 3394 mchunkptr p = tnext;
3398 int nfences = 0; 3395 int nfences = 0;
3399 3396
3400 /* reset top to new space */ 3397 /* reset top to new space */
3401 init_top (m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE); 3398 init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3402 3399
3403 /* Set up segment record */ 3400 /* Set up segment record */
3404 assert (is_aligned (ss)); 3401 assert(is_aligned(ss));
3405 set_size_and_pinuse_of_inuse_chunk (m, sp, ssize); 3402 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
3406 *ss = m->seg; /* Push current record */ 3403 *ss = m->seg; /* Push current record */
3407 m->seg.base = tbase; 3404 m->seg.base = tbase;
3408 m->seg.size = tsize; 3405 m->seg.size = tsize;
3409 m->seg.sflags = mmapped; 3406 m->seg.sflags = mmapped;
3410 m->seg.next = ss; 3407 m->seg.next = ss;
3411 3408
3412 /* Insert trailing fenceposts */ 3409 /* Insert trailing fenceposts */
3413 for (;;) { 3410 for (;;) {
3414 mchunkptr nextp = chunk_plus_offset (p, SIZE_T_SIZE); 3411 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
3415 p->head = FENCEPOST_HEAD; 3412 p->head = FENCEPOST_HEAD;
3416 ++nfences; 3413 ++nfences;
3417 if ((char *) (&(nextp->head)) < old_end) 3414 if ((char *) (&(nextp->head)) < old_end)
3418 p = nextp; 3415 p = nextp;
3419 else 3416 else
3420 break; 3417 break;
3421 } 3418 }
3422 assert (nfences >= 2); 3419 assert(nfences >= 2);
3423 3420
3424 /* Insert the rest of old top into a bin as an ordinary free chunk */ 3421 /* Insert the rest of old top into a bin as an ordinary free chunk */
3425 if (csp != old_top) { 3422 if (csp != old_top) {
3426 mchunkptr q = (mchunkptr) old_top; 3423 mchunkptr q = (mchunkptr) old_top;
3427 size_t psize = csp - old_top; 3424 size_t psize = csp - old_top;
3428 mchunkptr tn = chunk_plus_offset (q, psize); 3425 mchunkptr tn = chunk_plus_offset(q, psize);
3429 set_free_with_pinuse (q, psize, tn); 3426 set_free_with_pinuse(q, psize, tn);
3430 insert_chunk (m, q, psize); 3427 insert_chunk(m, q, psize);
3431 } 3428 }
3432 3429
3433 check_top_chunk (m, m->top); 3430 check_top_chunk(m, m->top);
3434 } 3431 }
3435 3432
3436 /* -------------------------- System allocation -------------------------- */ 3433 /* -------------------------- System allocation -------------------------- */
3437 3434
3438 /* Get memory from system using MORECORE or MMAP */ 3435 /* Get memory from system using MORECORE or MMAP */
3439 static void * 3436 static void *
3440 sys_alloc (mstate m, size_t nb) 3437 sys_alloc(mstate m, size_t nb)
3441 { 3438 {
3442 char *tbase = CMFAIL; 3439 char *tbase = CMFAIL;
3443 size_t tsize = 0; 3440 size_t tsize = 0;
3444 flag_t mmap_flag = 0; 3441 flag_t mmap_flag = 0;
3445 3442
3446 init_mparams (); 3443 init_mparams();
3447 3444
3448 /* Directly map large chunks */ 3445 /* Directly map large chunks */
3449 if (use_mmap (m) && nb >= mparams.mmap_threshold) { 3446 if (use_mmap(m) && nb >= mparams.mmap_threshold) {
3450 void *mem = mmap_alloc (m, nb); 3447 void *mem = mmap_alloc(m, nb);
3451 if (mem != 0) 3448 if (mem != 0)
3452 return mem; 3449 return mem;
3453 } 3450 }
3454 3451
3455 /* 3452 /*
3467 find space. 3464 find space.
3468 3. A call to MORECORE that cannot usually contiguously extend memory. 3465 3. A call to MORECORE that cannot usually contiguously extend memory.
3469 (disabled if not HAVE_MORECORE) 3466 (disabled if not HAVE_MORECORE)
3470 */ 3467 */
3471 3468
3472 if (MORECORE_CONTIGUOUS && !use_noncontiguous (m)) { 3469 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
3473 char *br = CMFAIL; 3470 char *br = CMFAIL;
3474 msegmentptr ss = 3471 msegmentptr ss =
3475 (m->top == 0) ? 0 : segment_holding (m, (char *) m->top); 3472 (m->top == 0) ? 0 : segment_holding(m, (char *) m->top);
3476 size_t asize = 0; 3473 size_t asize = 0;
3477 ACQUIRE_MORECORE_LOCK (); 3474 ACQUIRE_MORECORE_LOCK();
3478 3475
3479 if (ss == 0) { /* First time through or recovery */ 3476 if (ss == 0) { /* First time through or recovery */
3480 char *base = (char *) CALL_MORECORE (0); 3477 char *base = (char *) CALL_MORECORE(0);
3481 if (base != CMFAIL) { 3478 if (base != CMFAIL) {
3482 asize = granularity_align (nb + TOP_FOOT_SIZE + SIZE_T_ONE); 3479 asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3483 /* Adjust to end on a page boundary */ 3480 /* Adjust to end on a page boundary */
3484 if (!is_page_aligned (base)) 3481 if (!is_page_aligned(base))
3485 asize += (page_align ((size_t) base) - (size_t) base); 3482 asize += (page_align((size_t) base) - (size_t) base);
3486 /* Can't call MORECORE if size is negative when treated as signed */ 3483 /* Can't call MORECORE if size is negative when treated as signed */
3487 if (asize < HALF_MAX_SIZE_T && 3484 if (asize < HALF_MAX_SIZE_T &&
3488 (br = (char *) (CALL_MORECORE (asize))) == base) { 3485 (br = (char *) (CALL_MORECORE(asize))) == base) {
3489 tbase = base; 3486 tbase = base;
3490 tsize = asize; 3487 tsize = asize;
3491 } 3488 }
3492 } 3489 }
3493 } else { 3490 } else {
3494 /* Subtract out existing available top space from MORECORE request. */ 3491 /* Subtract out existing available top space from MORECORE request. */
3495 asize = 3492 asize =
3496 granularity_align (nb - m->topsize + TOP_FOOT_SIZE + 3493 granularity_align(nb - m->topsize + TOP_FOOT_SIZE +
3497 SIZE_T_ONE); 3494 SIZE_T_ONE);
3498 /* Use mem here only if it did continuously extend old space */ 3495 /* Use mem here only if it did continuously extend old space */
3499 if (asize < HALF_MAX_SIZE_T && 3496 if (asize < HALF_MAX_SIZE_T &&
3500 (br = 3497 (br =
3501 (char *) (CALL_MORECORE (asize))) == ss->base + ss->size) { 3498 (char *) (CALL_MORECORE(asize))) == ss->base + ss->size) {
3502 tbase = br; 3499 tbase = br;
3503 tsize = asize; 3500 tsize = asize;
3504 } 3501 }
3505 } 3502 }
3506 3503
3507 if (tbase == CMFAIL) { /* Cope with partial failure */ 3504 if (tbase == CMFAIL) { /* Cope with partial failure */
3508 if (br != CMFAIL) { /* Try to use/extend the space we did get */ 3505 if (br != CMFAIL) { /* Try to use/extend the space we did get */
3509 if (asize < HALF_MAX_SIZE_T && 3506 if (asize < HALF_MAX_SIZE_T &&
3510 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { 3507 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
3511 size_t esize = 3508 size_t esize =
3512 granularity_align (nb + TOP_FOOT_SIZE + 3509 granularity_align(nb + TOP_FOOT_SIZE +
3513 SIZE_T_ONE - asize); 3510 SIZE_T_ONE - asize);
3514 if (esize < HALF_MAX_SIZE_T) { 3511 if (esize < HALF_MAX_SIZE_T) {
3515 char *end = (char *) CALL_MORECORE (esize); 3512 char *end = (char *) CALL_MORECORE(esize);
3516 if (end != CMFAIL) 3513 if (end != CMFAIL)
3517 asize += esize; 3514 asize += esize;
3518 else { /* Can't use; try to release */ 3515 else { /* Can't use; try to release */
3519 end = (char *) CALL_MORECORE (-asize); 3516 end = (char *) CALL_MORECORE(-asize);
3520 br = CMFAIL; 3517 br = CMFAIL;
3521 } 3518 }
3522 } 3519 }
3523 } 3520 }
3524 } 3521 }
3525 if (br != CMFAIL) { /* Use the space we did get */ 3522 if (br != CMFAIL) { /* Use the space we did get */
3526 tbase = br; 3523 tbase = br;
3527 tsize = asize; 3524 tsize = asize;
3528 } else 3525 } else
3529 disable_contiguous (m); /* Don't try contiguous path in the future */ 3526 disable_contiguous(m); /* Don't try contiguous path in the future */
3530 } 3527 }
3531 3528
3532 RELEASE_MORECORE_LOCK (); 3529 RELEASE_MORECORE_LOCK();
3533 } 3530 }
3534 3531
3535 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ 3532 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
3536 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; 3533 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
3537 size_t rsize = granularity_align (req); 3534 size_t rsize = granularity_align(req);
3538 if (rsize > nb) { /* Fail if wraps around zero */ 3535 if (rsize > nb) { /* Fail if wraps around zero */
3539 char *mp = (char *) (CALL_MMAP (rsize)); 3536 char *mp = (char *) (CALL_MMAP(rsize));
3540 if (mp != CMFAIL) { 3537 if (mp != CMFAIL) {
3541 tbase = mp; 3538 tbase = mp;
3542 tsize = rsize; 3539 tsize = rsize;
3543 mmap_flag = IS_MMAPPED_BIT; 3540 mmap_flag = IS_MMAPPED_BIT;
3544 } 3541 }
3545 } 3542 }
3546 } 3543 }
3547 3544
3548 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ 3545 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
3549 size_t asize = granularity_align (nb + TOP_FOOT_SIZE + SIZE_T_ONE); 3546 size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3550 if (asize < HALF_MAX_SIZE_T) { 3547 if (asize < HALF_MAX_SIZE_T) {
3551 char *br = CMFAIL; 3548 char *br = CMFAIL;
3552 char *end = CMFAIL; 3549 char *end = CMFAIL;
3553 ACQUIRE_MORECORE_LOCK (); 3550 ACQUIRE_MORECORE_LOCK();
3554 br = (char *) (CALL_MORECORE (asize)); 3551 br = (char *) (CALL_MORECORE(asize));
3555 end = (char *) (CALL_MORECORE (0)); 3552 end = (char *) (CALL_MORECORE(0));
3556 RELEASE_MORECORE_LOCK (); 3553 RELEASE_MORECORE_LOCK();
3557 if (br != CMFAIL && end != CMFAIL && br < end) { 3554 if (br != CMFAIL && end != CMFAIL && br < end) {
3558 size_t ssize = end - br; 3555 size_t ssize = end - br;
3559 if (ssize > nb + TOP_FOOT_SIZE) { 3556 if (ssize > nb + TOP_FOOT_SIZE) {
3560 tbase = br; 3557 tbase = br;
3561 tsize = ssize; 3558 tsize = ssize;
3567 if (tbase != CMFAIL) { 3564 if (tbase != CMFAIL) {
3568 3565
3569 if ((m->footprint += tsize) > m->max_footprint) 3566 if ((m->footprint += tsize) > m->max_footprint)
3570 m->max_footprint = m->footprint; 3567 m->max_footprint = m->footprint;
3571 3568
3572 if (!is_initialized (m)) { /* first-time initialization */ 3569 if (!is_initialized(m)) { /* first-time initialization */
3573 m->seg.base = m->least_addr = tbase; 3570 m->seg.base = m->least_addr = tbase;
3574 m->seg.size = tsize; 3571 m->seg.size = tsize;
3575 m->seg.sflags = mmap_flag; 3572 m->seg.sflags = mmap_flag;
3576 m->magic = mparams.magic; 3573 m->magic = mparams.magic;
3577 init_bins (m); 3574 init_bins(m);
3578 if (is_global (m)) 3575 if (is_global(m))
3579 init_top (m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE); 3576 init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3580 else { 3577 else {
3581 /* Offset top by embedded malloc_state */ 3578 /* Offset top by embedded malloc_state */
3582 mchunkptr mn = next_chunk (mem2chunk (m)); 3579 mchunkptr mn = next_chunk(mem2chunk(m));
3583 init_top (m, mn, 3580 init_top(m, mn,
3584 (size_t) ((tbase + tsize) - (char *) mn) - 3581 (size_t) ((tbase + tsize) - (char *) mn) -
3585 TOP_FOOT_SIZE); 3582 TOP_FOOT_SIZE);
3586 } 3583 }
3587 } 3584 }
3588 3585
3589 else { 3586 else {
3590 /* Try to merge with an existing segment */ 3587 /* Try to merge with an existing segment */
3591 msegmentptr sp = &m->seg; 3588 msegmentptr sp = &m->seg;
3592 while (sp != 0 && tbase != sp->base + sp->size) 3589 while (sp != 0 && tbase != sp->base + sp->size)
3593 sp = sp->next; 3590 sp = sp->next;
3594 if (sp != 0 && !is_extern_segment (sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds (sp, m->top)) { /* append */ 3591 if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */
3595 sp->size += tsize; 3592 sp->size += tsize;
3596 init_top (m, m->top, m->topsize + tsize); 3593 init_top(m, m->top, m->topsize + tsize);
3597 } else { 3594 } else {
3598 if (tbase < m->least_addr) 3595 if (tbase < m->least_addr)
3599 m->least_addr = tbase; 3596 m->least_addr = tbase;
3600 sp = &m->seg; 3597 sp = &m->seg;
3601 while (sp != 0 && sp->base != tbase + tsize) 3598 while (sp != 0 && sp->base != tbase + tsize)
3602 sp = sp->next; 3599 sp = sp->next;
3603 if (sp != 0 && 3600 if (sp != 0 &&
3604 !is_extern_segment (sp) && 3601 !is_extern_segment(sp) &&
3605 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) { 3602 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
3606 char *oldbase = sp->base; 3603 char *oldbase = sp->base;
3607 sp->base = tbase; 3604 sp->base = tbase;
3608 sp->size += tsize; 3605 sp->size += tsize;
3609 return prepend_alloc (m, tbase, oldbase, nb); 3606 return prepend_alloc(m, tbase, oldbase, nb);
3610 } else 3607 } else
3611 add_segment (m, tbase, tsize, mmap_flag); 3608 add_segment(m, tbase, tsize, mmap_flag);
3612 } 3609 }
3613 } 3610 }
3614 3611
3615 if (nb < m->topsize) { /* Allocate from new or extended top space */ 3612 if (nb < m->topsize) { /* Allocate from new or extended top space */
3616 size_t rsize = m->topsize -= nb; 3613 size_t rsize = m->topsize -= nb;
3617 mchunkptr p = m->top; 3614 mchunkptr p = m->top;
3618 mchunkptr r = m->top = chunk_plus_offset (p, nb); 3615 mchunkptr r = m->top = chunk_plus_offset(p, nb);
3619 r->head = rsize | PINUSE_BIT; 3616 r->head = rsize | PINUSE_BIT;
3620 set_size_and_pinuse_of_inuse_chunk (m, p, nb); 3617 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3621 check_top_chunk (m, m->top); 3618 check_top_chunk(m, m->top);
3622 check_malloced_chunk (m, chunk2mem (p), nb); 3619 check_malloced_chunk(m, chunk2mem(p), nb);
3623 return chunk2mem (p); 3620 return chunk2mem(p);
3624 } 3621 }
3625 } 3622 }
3626 3623
3627 MALLOC_FAILURE_ACTION; 3624 MALLOC_FAILURE_ACTION;
3628 return 0; 3625 return 0;
3630 3627
3631 /* ----------------------- system deallocation -------------------------- */ 3628 /* ----------------------- system deallocation -------------------------- */
3632 3629
3633 /* Unmap and unlink any mmapped segments that don't contain used chunks */ 3630 /* Unmap and unlink any mmapped segments that don't contain used chunks */
3634 static size_t 3631 static size_t
3635 release_unused_segments (mstate m) 3632 release_unused_segments(mstate m)
3636 { 3633 {
3637 size_t released = 0; 3634 size_t released = 0;
3638 msegmentptr pred = &m->seg; 3635 msegmentptr pred = &m->seg;
3639 msegmentptr sp = pred->next; 3636 msegmentptr sp = pred->next;
3640 while (sp != 0) { 3637 while (sp != 0) {
3641 char *base = sp->base; 3638 char *base = sp->base;
3642 size_t size = sp->size; 3639 size_t size = sp->size;
3643 msegmentptr next = sp->next; 3640 msegmentptr next = sp->next;
3644 if (is_mmapped_segment (sp) && !is_extern_segment (sp)) { 3641 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
3645 mchunkptr p = align_as_chunk (base); 3642 mchunkptr p = align_as_chunk(base);
3646 size_t psize = chunksize (p); 3643 size_t psize = chunksize(p);
3647 /* Can unmap if first chunk holds entire segment and not pinned */ 3644 /* Can unmap if first chunk holds entire segment and not pinned */
3648 if (!cinuse (p) 3645 if (!cinuse(p)
3649 && (char *) p + psize >= base + size - TOP_FOOT_SIZE) { 3646 && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
3650 tchunkptr tp = (tchunkptr) p; 3647 tchunkptr tp = (tchunkptr) p;
3651 assert (segment_holds (sp, (char *) sp)); 3648 assert(segment_holds(sp, (char *) sp));
3652 if (p == m->dv) { 3649 if (p == m->dv) {
3653 m->dv = 0; 3650 m->dv = 0;
3654 m->dvsize = 0; 3651 m->dvsize = 0;
3655 } else { 3652 } else {
3656 unlink_large_chunk (m, tp); 3653 unlink_large_chunk(m, tp);
3657 } 3654 }
3658 if (CALL_MUNMAP (base, size) == 0) { 3655 if (CALL_MUNMAP(base, size) == 0) {
3659 released += size; 3656 released += size;
3660 m->footprint -= size; 3657 m->footprint -= size;
3661 /* unlink obsoleted record */ 3658 /* unlink obsoleted record */
3662 sp = pred; 3659 sp = pred;
3663 sp->next = next; 3660 sp->next = next;
3664 } else { /* back out if cannot unmap */ 3661 } else { /* back out if cannot unmap */
3665 insert_large_chunk (m, tp, psize); 3662 insert_large_chunk(m, tp, psize);
3666 } 3663 }
3667 } 3664 }
3668 } 3665 }
3669 pred = sp; 3666 pred = sp;
3670 sp = next; 3667 sp = next;
3671 } 3668 }
3672 return released; 3669 return released;
3673 } 3670 }
3674 3671
3675 static int 3672 static int
3676 sys_trim (mstate m, size_t pad) 3673 sys_trim(mstate m, size_t pad)
3677 { 3674 {
3678 size_t released = 0; 3675 size_t released = 0;
3679 if (pad < MAX_REQUEST && is_initialized (m)) { 3676 if (pad < MAX_REQUEST && is_initialized(m)) {
3680 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ 3677 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
3681 3678
3682 if (m->topsize > pad) { 3679 if (m->topsize > pad) {
3683 /* Shrink top space in granularity-size units, keeping at least one */ 3680 /* Shrink top space in granularity-size units, keeping at least one */
3684 size_t unit = mparams.granularity; 3681 size_t unit = mparams.granularity;
3685 size_t extra = 3682 size_t extra =
3686 ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - 3683 ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
3687 SIZE_T_ONE) * unit; 3684 SIZE_T_ONE) * unit;
3688 msegmentptr sp = segment_holding (m, (char *) m->top); 3685 msegmentptr sp = segment_holding(m, (char *) m->top);
3689 3686
3690 if (!is_extern_segment (sp)) { 3687 if (!is_extern_segment(sp)) {
3691 if (is_mmapped_segment (sp)) { 3688 if (is_mmapped_segment(sp)) {
3692 if (HAVE_MMAP && sp->size >= extra && !has_segment_link (m, sp)) { /* can't shrink if pinned */ 3689 if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */
3693 size_t newsize = sp->size - extra; 3690 size_t newsize = sp->size - extra;
3694 /* Prefer mremap, fall back to munmap */ 3691 /* Prefer mremap, fall back to munmap */
3695 if ((CALL_MREMAP 3692 if ((CALL_MREMAP
3696 (sp->base, sp->size, newsize, 0) != MFAIL) 3693 (sp->base, sp->size, newsize, 0) != MFAIL)
3697 || (CALL_MUNMAP (sp->base + newsize, extra) 3694 || (CALL_MUNMAP(sp->base + newsize, extra)
3698 == 0)) { 3695 == 0)) {
3699 released = extra; 3696 released = extra;
3700 } 3697 }
3701 } 3698 }
3702 } else if (HAVE_MORECORE) { 3699 } else if (HAVE_MORECORE) {
3703 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ 3700 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
3704 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; 3701 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3705 ACQUIRE_MORECORE_LOCK (); 3702 ACQUIRE_MORECORE_LOCK();
3706 { 3703 {
3707 /* Make sure end of memory is where we last set it. */ 3704 /* Make sure end of memory is where we last set it. */
3708 char *old_br = (char *) (CALL_MORECORE (0)); 3705 char *old_br = (char *) (CALL_MORECORE(0));
3709 if (old_br == sp->base + sp->size) { 3706 if (old_br == sp->base + sp->size) {
3710 char *rel_br = (char *) (CALL_MORECORE (-extra)); 3707 char *rel_br = (char *) (CALL_MORECORE(-extra));
3711 char *new_br = (char *) (CALL_MORECORE (0)); 3708 char *new_br = (char *) (CALL_MORECORE(0));
3712 if (rel_br != CMFAIL && new_br < old_br) 3709 if (rel_br != CMFAIL && new_br < old_br)
3713 released = old_br - new_br; 3710 released = old_br - new_br;
3714 } 3711 }
3715 } 3712 }
3716 RELEASE_MORECORE_LOCK (); 3713 RELEASE_MORECORE_LOCK();
3717 } 3714 }
3718 } 3715 }
3719 3716
3720 if (released != 0) { 3717 if (released != 0) {
3721 sp->size -= released; 3718 sp->size -= released;
3722 m->footprint -= released; 3719 m->footprint -= released;
3723 init_top (m, m->top, m->topsize - released); 3720 init_top(m, m->top, m->topsize - released);
3724 check_top_chunk (m, m->top); 3721 check_top_chunk(m, m->top);
3725 } 3722 }
3726 } 3723 }
3727 3724
3728 /* Unmap any unused mmapped segments */ 3725 /* Unmap any unused mmapped segments */
3729 if (HAVE_MMAP) 3726 if (HAVE_MMAP)
3730 released += release_unused_segments (m); 3727 released += release_unused_segments(m);
3731 3728
3732 /* On failure, disable autotrim to avoid repeated failed future calls */ 3729 /* On failure, disable autotrim to avoid repeated failed future calls */
3733 if (released == 0) 3730 if (released == 0)
3734 m->trim_check = MAX_SIZE_T; 3731 m->trim_check = MAX_SIZE_T;
3735 } 3732 }
3739 3736
3740 /* ---------------------------- malloc support --------------------------- */ 3737 /* ---------------------------- malloc support --------------------------- */
3741 3738
3742 /* allocate a large request from the best fitting chunk in a treebin */ 3739 /* allocate a large request from the best fitting chunk in a treebin */
3743 static void * 3740 static void *
3744 tmalloc_large (mstate m, size_t nb) 3741 tmalloc_large(mstate m, size_t nb)
3745 { 3742 {
3746 tchunkptr v = 0; 3743 tchunkptr v = 0;
3747 size_t rsize = -nb; /* Unsigned negation */ 3744 size_t rsize = -nb; /* Unsigned negation */
3748 tchunkptr t; 3745 tchunkptr t;
3749 bindex_t idx; 3746 bindex_t idx;
3750 compute_tree_index (nb, idx); 3747 compute_tree_index(nb, idx);
3751 3748
3752 if ((t = *treebin_at (m, idx)) != 0) { 3749 if ((t = *treebin_at(m, idx)) != 0) {
3753 /* Traverse tree for this bin looking for node with size == nb */ 3750 /* Traverse tree for this bin looking for node with size == nb */
3754 size_t sizebits = nb << leftshift_for_tree_index (idx); 3751 size_t sizebits = nb << leftshift_for_tree_index(idx);
3755 tchunkptr rst = 0; /* The deepest untaken right subtree */ 3752 tchunkptr rst = 0; /* The deepest untaken right subtree */
3756 for (;;) { 3753 for (;;) {
3757 tchunkptr rt; 3754 tchunkptr rt;
3758 size_t trem = chunksize (t) - nb; 3755 size_t trem = chunksize(t) - nb;
3759 if (trem < rsize) { 3756 if (trem < rsize) {
3760 v = t; 3757 v = t;
3761 if ((rsize = trem) == 0) 3758 if ((rsize = trem) == 0)
3762 break; 3759 break;
3763 } 3760 }
3772 sizebits <<= 1; 3769 sizebits <<= 1;
3773 } 3770 }
3774 } 3771 }
3775 3772
3776 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ 3773 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3777 binmap_t leftbits = left_bits (idx2bit (idx)) & m->treemap; 3774 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3778 if (leftbits != 0) { 3775 if (leftbits != 0) {
3779 bindex_t i; 3776 bindex_t i;
3780 binmap_t leastbit = least_bit (leftbits); 3777 binmap_t leastbit = least_bit(leftbits);
3781 compute_bit2idx (leastbit, i); 3778 compute_bit2idx(leastbit, i);
3782 t = *treebin_at (m, i); 3779 t = *treebin_at(m, i);
3783 } 3780 }
3784 } 3781 }
3785 3782
3786 while (t != 0) { /* find smallest of tree or subtree */ 3783 while (t != 0) { /* find smallest of tree or subtree */
3787 size_t trem = chunksize (t) - nb; 3784 size_t trem = chunksize(t) - nb;
3788 if (trem < rsize) { 3785 if (trem < rsize) {
3789 rsize = trem; 3786 rsize = trem;
3790 v = t; 3787 v = t;
3791 } 3788 }
3792 t = leftmost_child (t); 3789 t = leftmost_child(t);
3793 } 3790 }
3794 3791
3795 /* If dv is a better fit, return 0 so malloc will use it */ 3792 /* If dv is a better fit, return 0 so malloc will use it */
3796 if (v != 0 && rsize < (size_t) (m->dvsize - nb)) { 3793 if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
3797 if (RTCHECK (ok_address (m, v))) { /* split */ 3794 if (RTCHECK(ok_address(m, v))) { /* split */
3798 mchunkptr r = chunk_plus_offset (v, nb); 3795 mchunkptr r = chunk_plus_offset(v, nb);
3799 assert (chunksize (v) == rsize + nb); 3796 assert(chunksize(v) == rsize + nb);
3800 if (RTCHECK (ok_next (v, r))) { 3797 if (RTCHECK(ok_next(v, r))) {
3801 unlink_large_chunk (m, v); 3798 unlink_large_chunk(m, v);
3802 if (rsize < MIN_CHUNK_SIZE) 3799 if (rsize < MIN_CHUNK_SIZE)
3803 set_inuse_and_pinuse (m, v, (rsize + nb)); 3800 set_inuse_and_pinuse(m, v, (rsize + nb));
3804 else { 3801 else {
3805 set_size_and_pinuse_of_inuse_chunk (m, v, nb); 3802 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3806 set_size_and_pinuse_of_free_chunk (r, rsize); 3803 set_size_and_pinuse_of_free_chunk(r, rsize);
3807 insert_chunk (m, r, rsize); 3804 insert_chunk(m, r, rsize);
3808 } 3805 }
3809 return chunk2mem (v); 3806 return chunk2mem(v);
3810 } 3807 }
3811 } 3808 }
3812 CORRUPTION_ERROR_ACTION (m); 3809 CORRUPTION_ERROR_ACTION(m);
3813 } 3810 }
3814 return 0; 3811 return 0;
3815 } 3812 }
3816 3813
3817 /* allocate a small request from the best fitting chunk in a treebin */ 3814 /* allocate a small request from the best fitting chunk in a treebin */
3818 static void * 3815 static void *
3819 tmalloc_small (mstate m, size_t nb) 3816 tmalloc_small(mstate m, size_t nb)
3820 { 3817 {
3821 tchunkptr t, v; 3818 tchunkptr t, v;
3822 size_t rsize; 3819 size_t rsize;
3823 bindex_t i; 3820 bindex_t i;
3824 binmap_t leastbit = least_bit (m->treemap); 3821 binmap_t leastbit = least_bit(m->treemap);
3825 compute_bit2idx (leastbit, i); 3822 compute_bit2idx(leastbit, i);
3826 3823
3827 v = t = *treebin_at (m, i); 3824 v = t = *treebin_at(m, i);
3828 rsize = chunksize (t) - nb; 3825 rsize = chunksize(t) - nb;
3829 3826
3830 while ((t = leftmost_child (t)) != 0) { 3827 while ((t = leftmost_child(t)) != 0) {
3831 size_t trem = chunksize (t) - nb; 3828 size_t trem = chunksize(t) - nb;
3832 if (trem < rsize) { 3829 if (trem < rsize) {
3833 rsize = trem; 3830 rsize = trem;
3834 v = t; 3831 v = t;
3835 } 3832 }
3836 } 3833 }
3837 3834
3838 if (RTCHECK (ok_address (m, v))) { 3835 if (RTCHECK(ok_address(m, v))) {
3839 mchunkptr r = chunk_plus_offset (v, nb); 3836 mchunkptr r = chunk_plus_offset(v, nb);
3840 assert (chunksize (v) == rsize + nb); 3837 assert(chunksize(v) == rsize + nb);
3841 if (RTCHECK (ok_next (v, r))) { 3838 if (RTCHECK(ok_next(v, r))) {
3842 unlink_large_chunk (m, v); 3839 unlink_large_chunk(m, v);
3843 if (rsize < MIN_CHUNK_SIZE) 3840 if (rsize < MIN_CHUNK_SIZE)
3844 set_inuse_and_pinuse (m, v, (rsize + nb)); 3841 set_inuse_and_pinuse(m, v, (rsize + nb));
3845 else { 3842 else {
3846 set_size_and_pinuse_of_inuse_chunk (m, v, nb); 3843 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3847 set_size_and_pinuse_of_free_chunk (r, rsize); 3844 set_size_and_pinuse_of_free_chunk(r, rsize);
3848 replace_dv (m, r, rsize); 3845 replace_dv(m, r, rsize);
3849 } 3846 }
3850 return chunk2mem (v); 3847 return chunk2mem(v);
3851 } 3848 }
3852 } 3849 }
3853 3850
3854 CORRUPTION_ERROR_ACTION (m); 3851 CORRUPTION_ERROR_ACTION(m);
3855 return 0; 3852 return 0;
3856 } 3853 }
3857 3854
3858 /* --------------------------- realloc support --------------------------- */ 3855 /* --------------------------- realloc support --------------------------- */
3859 3856
3860 static void * 3857 static void *
3861 internal_realloc (mstate m, void *oldmem, size_t bytes) 3858 internal_realloc(mstate m, void *oldmem, size_t bytes)
3862 { 3859 {
3863 if (bytes >= MAX_REQUEST) { 3860 if (bytes >= MAX_REQUEST) {
3864 MALLOC_FAILURE_ACTION; 3861 MALLOC_FAILURE_ACTION;
3865 return 0; 3862 return 0;
3866 } 3863 }
3867 if (!PREACTION (m)) { 3864 if (!PREACTION(m)) {
3868 mchunkptr oldp = mem2chunk (oldmem); 3865 mchunkptr oldp = mem2chunk(oldmem);
3869 size_t oldsize = chunksize (oldp); 3866 size_t oldsize = chunksize(oldp);
3870 mchunkptr next = chunk_plus_offset (oldp, oldsize); 3867 mchunkptr next = chunk_plus_offset(oldp, oldsize);
3871 mchunkptr newp = 0; 3868 mchunkptr newp = 0;
3872 void *extra = 0; 3869 void *extra = 0;
3873 3870
3874 /* Try to either shrink or extend into top. Else malloc-copy-free */ 3871 /* Try to either shrink or extend into top. Else malloc-copy-free */
3875 3872
3876 if (RTCHECK (ok_address (m, oldp) && ok_cinuse (oldp) && 3873 if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
3877 ok_next (oldp, next) && ok_pinuse (next))) { 3874 ok_next(oldp, next) && ok_pinuse(next))) {
3878 size_t nb = request2size (bytes); 3875 size_t nb = request2size(bytes);
3879 if (is_mmapped (oldp)) 3876 if (is_mmapped(oldp))
3880 newp = mmap_resize (m, oldp, nb); 3877 newp = mmap_resize(m, oldp, nb);
3881 else if (oldsize >= nb) { /* already big enough */ 3878 else if (oldsize >= nb) { /* already big enough */
3882 size_t rsize = oldsize - nb; 3879 size_t rsize = oldsize - nb;
3883 newp = oldp; 3880 newp = oldp;
3884 if (rsize >= MIN_CHUNK_SIZE) { 3881 if (rsize >= MIN_CHUNK_SIZE) {
3885 mchunkptr remainder = chunk_plus_offset (newp, nb); 3882 mchunkptr remainder = chunk_plus_offset(newp, nb);
3886 set_inuse (m, newp, nb); 3883 set_inuse(m, newp, nb);
3887 set_inuse (m, remainder, rsize); 3884 set_inuse(m, remainder, rsize);
3888 extra = chunk2mem (remainder); 3885 extra = chunk2mem(remainder);
3889 } 3886 }
3890 } else if (next == m->top && oldsize + m->topsize > nb) { 3887 } else if (next == m->top && oldsize + m->topsize > nb) {
3891 /* Expand into top */ 3888 /* Expand into top */
3892 size_t newsize = oldsize + m->topsize; 3889 size_t newsize = oldsize + m->topsize;
3893 size_t newtopsize = newsize - nb; 3890 size_t newtopsize = newsize - nb;
3894 mchunkptr newtop = chunk_plus_offset (oldp, nb); 3891 mchunkptr newtop = chunk_plus_offset(oldp, nb);
3895 set_inuse (m, oldp, nb); 3892 set_inuse(m, oldp, nb);
3896 newtop->head = newtopsize | PINUSE_BIT; 3893 newtop->head = newtopsize | PINUSE_BIT;
3897 m->top = newtop; 3894 m->top = newtop;
3898 m->topsize = newtopsize; 3895 m->topsize = newtopsize;
3899 newp = oldp; 3896 newp = oldp;
3900 } 3897 }
3901 } else { 3898 } else {
3902 USAGE_ERROR_ACTION (m, oldmem); 3899 USAGE_ERROR_ACTION(m, oldmem);
3903 POSTACTION (m); 3900 POSTACTION(m);
3904 return 0; 3901 return 0;
3905 } 3902 }
3906 3903
3907 POSTACTION (m); 3904 POSTACTION(m);
3908 3905
3909 if (newp != 0) { 3906 if (newp != 0) {
3910 if (extra != 0) { 3907 if (extra != 0) {
3911 internal_free (m, extra); 3908 internal_free(m, extra);
3912 } 3909 }
3913 check_inuse_chunk (m, newp); 3910 check_inuse_chunk(m, newp);
3914 return chunk2mem (newp); 3911 return chunk2mem(newp);
3915 } else { 3912 } else {
3916 void *newmem = internal_malloc (m, bytes); 3913 void *newmem = internal_malloc(m, bytes);
3917 if (newmem != 0) { 3914 if (newmem != 0) {
3918 size_t oc = oldsize - overhead_for (oldp); 3915 size_t oc = oldsize - overhead_for(oldp);
3919 memcpy (newmem, oldmem, (oc < bytes) ? oc : bytes); 3916 memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
3920 internal_free (m, oldmem); 3917 internal_free(m, oldmem);
3921 } 3918 }
3922 return newmem; 3919 return newmem;
3923 } 3920 }
3924 } 3921 }
3925 return 0; 3922 return 0;
3926 } 3923 }
3927 3924
3928 /* --------------------------- memalign support -------------------------- */ 3925 /* --------------------------- memalign support -------------------------- */
3929 3926
3930 static void * 3927 static void *
3931 internal_memalign (mstate m, size_t alignment, size_t bytes) 3928 internal_memalign(mstate m, size_t alignment, size_t bytes)
3932 { 3929 {
3933 if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ 3930 if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
3934 return internal_malloc (m, bytes); 3931 return internal_malloc(m, bytes);
3935 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ 3932 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3936 alignment = MIN_CHUNK_SIZE; 3933 alignment = MIN_CHUNK_SIZE;
3937 if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */ 3934 if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */
3938 size_t a = MALLOC_ALIGNMENT << 1; 3935 size_t a = MALLOC_ALIGNMENT << 1;
3939 while (a < alignment) 3936 while (a < alignment)
3944 if (bytes >= MAX_REQUEST - alignment) { 3941 if (bytes >= MAX_REQUEST - alignment) {
3945 if (m != 0) { /* Test isn't needed but avoids compiler warning */ 3942 if (m != 0) { /* Test isn't needed but avoids compiler warning */
3946 MALLOC_FAILURE_ACTION; 3943 MALLOC_FAILURE_ACTION;
3947 } 3944 }
3948 } else { 3945 } else {
3949 size_t nb = request2size (bytes); 3946 size_t nb = request2size(bytes);
3950 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; 3947 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3951 char *mem = (char *) internal_malloc (m, req); 3948 char *mem = (char *) internal_malloc(m, req);
3952 if (mem != 0) { 3949 if (mem != 0) {
3953 void *leader = 0; 3950 void *leader = 0;
3954 void *trailer = 0; 3951 void *trailer = 0;
3955 mchunkptr p = mem2chunk (mem); 3952 mchunkptr p = mem2chunk(mem);
3956 3953
3957 if (PREACTION (m)) 3954 if (PREACTION(m))
3958 return 0; 3955 return 0;
3959 if ((((size_t) (mem)) % alignment) != 0) { /* misaligned */ 3956 if ((((size_t) (mem)) % alignment) != 0) { /* misaligned */
3960 /* 3957 /*
3961 Find an aligned spot inside chunk. Since we need to give 3958 Find an aligned spot inside chunk. Since we need to give
3962 back leading space in a chunk of at least MIN_CHUNK_SIZE, if 3959 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3964 MIN_CHUNK_SIZE leader, we can move to the next aligned spot. 3961 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3965 We've allocated enough total room so that this is always 3962 We've allocated enough total room so that this is always
3966 possible. 3963 possible.
3967 */ 3964 */
3968 char *br = (char *) 3965 char *br = (char *)
3969 mem2chunk ((size_t) 3966 mem2chunk((size_t)
3970 (((size_t) 3967 (((size_t)
3971 (mem + alignment - 3968 (mem + alignment -
3972 SIZE_T_ONE)) & -alignment)); 3969 SIZE_T_ONE)) & -alignment));
3973 char *pos = 3970 char *pos =
3974 ((size_t) (br - (char *) (p)) >= 3971 ((size_t) (br - (char *) (p)) >=
3975 MIN_CHUNK_SIZE) ? br : br + alignment; 3972 MIN_CHUNK_SIZE) ? br : br + alignment;
3976 mchunkptr newp = (mchunkptr) pos; 3973 mchunkptr newp = (mchunkptr) pos;
3977 size_t leadsize = pos - (char *) (p); 3974 size_t leadsize = pos - (char *) (p);
3978 size_t newsize = chunksize (p) - leadsize; 3975 size_t newsize = chunksize(p) - leadsize;
3979 3976
3980 if (is_mmapped (p)) { /* For mmapped chunks, just adjust offset */ 3977 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3981 newp->prev_foot = p->prev_foot + leadsize; 3978 newp->prev_foot = p->prev_foot + leadsize;
3982 newp->head = (newsize | CINUSE_BIT); 3979 newp->head = (newsize | CINUSE_BIT);
3983 } else { /* Otherwise, give back leader, use the rest */ 3980 } else { /* Otherwise, give back leader, use the rest */
3984 set_inuse (m, newp, newsize); 3981 set_inuse(m, newp, newsize);
3985 set_inuse (m, p, leadsize); 3982 set_inuse(m, p, leadsize);
3986 leader = chunk2mem (p); 3983 leader = chunk2mem(p);
3987 } 3984 }
3988 p = newp; 3985 p = newp;
3989 } 3986 }
3990 3987
3991 /* Give back spare room at the end */ 3988 /* Give back spare room at the end */
3992 if (!is_mmapped (p)) { 3989 if (!is_mmapped(p)) {
3993 size_t size = chunksize (p); 3990 size_t size = chunksize(p);
3994 if (size > nb + MIN_CHUNK_SIZE) { 3991 if (size > nb + MIN_CHUNK_SIZE) {
3995 size_t remainder_size = size - nb; 3992 size_t remainder_size = size - nb;
3996 mchunkptr remainder = chunk_plus_offset (p, nb); 3993 mchunkptr remainder = chunk_plus_offset(p, nb);
3997 set_inuse (m, p, nb); 3994 set_inuse(m, p, nb);
3998 set_inuse (m, remainder, remainder_size); 3995 set_inuse(m, remainder, remainder_size);
3999 trailer = chunk2mem (remainder); 3996 trailer = chunk2mem(remainder);
4000 } 3997 }
4001 } 3998 }
4002 3999
4003 assert (chunksize (p) >= nb); 4000 assert(chunksize(p) >= nb);
4004 assert ((((size_t) (chunk2mem (p))) % alignment) == 0); 4001 assert((((size_t) (chunk2mem(p))) % alignment) == 0);
4005 check_inuse_chunk (m, p); 4002 check_inuse_chunk(m, p);
4006 POSTACTION (m); 4003 POSTACTION(m);
4007 if (leader != 0) { 4004 if (leader != 0) {
4008 internal_free (m, leader); 4005 internal_free(m, leader);
4009 } 4006 }
4010 if (trailer != 0) { 4007 if (trailer != 0) {
4011 internal_free (m, trailer); 4008 internal_free(m, trailer);
4012 } 4009 }
4013 return chunk2mem (p); 4010 return chunk2mem(p);
4014 } 4011 }
4015 } 4012 }
4016 return 0; 4013 return 0;
4017 } 4014 }
4018 4015
4019 /* ------------------------ comalloc/coalloc support --------------------- */ 4016 /* ------------------------ comalloc/coalloc support --------------------- */
4020 4017
4021 static void ** 4018 static void **
4022 ialloc (mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[]) 4019 ialloc(mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
4023 { 4020 {
4024 /* 4021 /*
4025 This provides common support for independent_X routines, handling 4022 This provides common support for independent_X routines, handling
4026 all of the combinations that can result. 4023 all of the combinations that can result.
4027 4024
4049 marray = chunks; 4046 marray = chunks;
4050 array_size = 0; 4047 array_size = 0;
4051 } else { 4048 } else {
4052 /* if empty req, must still return chunk representing empty array */ 4049 /* if empty req, must still return chunk representing empty array */
4053 if (n_elements == 0) 4050 if (n_elements == 0)
4054 return (void **) internal_malloc (m, 0); 4051 return (void **) internal_malloc(m, 0);
4055 marray = 0; 4052 marray = 0;
4056 array_size = request2size (n_elements * (sizeof (void *))); 4053 array_size = request2size(n_elements * (sizeof(void *)));
4057 } 4054 }
4058 4055
4059 /* compute total element size */ 4056 /* compute total element size */
4060 if (opts & 0x1) { /* all-same-size */ 4057 if (opts & 0x1) { /* all-same-size */
4061 element_size = request2size (*sizes); 4058 element_size = request2size(*sizes);
4062 contents_size = n_elements * element_size; 4059 contents_size = n_elements * element_size;
4063 } else { /* add up all the sizes */ 4060 } else { /* add up all the sizes */
4064 element_size = 0; 4061 element_size = 0;
4065 contents_size = 0; 4062 contents_size = 0;
4066 for (i = 0; i != n_elements; ++i) 4063 for (i = 0; i != n_elements; ++i)
4067 contents_size += request2size (sizes[i]); 4064 contents_size += request2size(sizes[i]);
4068 } 4065 }
4069 4066
4070 size = contents_size + array_size; 4067 size = contents_size + array_size;
4071 4068
4072 /* 4069 /*
4073 Allocate the aggregate chunk. First disable direct-mmapping so 4070 Allocate the aggregate chunk. First disable direct-mmapping so
4074 malloc won't use it, since we would not be able to later 4071 malloc won't use it, since we would not be able to later
4075 free/realloc space internal to a segregated mmap region. 4072 free/realloc space internal to a segregated mmap region.
4076 */ 4073 */
4077 was_enabled = use_mmap (m); 4074 was_enabled = use_mmap(m);
4078 disable_mmap (m); 4075 disable_mmap(m);
4079 mem = internal_malloc (m, size - CHUNK_OVERHEAD); 4076 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
4080 if (was_enabled) 4077 if (was_enabled)
4081 enable_mmap (m); 4078 enable_mmap(m);
4082 if (mem == 0) 4079 if (mem == 0)
4083 return 0; 4080 return 0;
4084 4081
4085 if (PREACTION (m)) 4082 if (PREACTION(m))
4086 return 0; 4083 return 0;
4087 p = mem2chunk (mem); 4084 p = mem2chunk(mem);
4088 remainder_size = chunksize (p); 4085 remainder_size = chunksize(p);
4089 4086
4090 assert (!is_mmapped (p)); 4087 assert(!is_mmapped(p));
4091 4088
4092 if (opts & 0x2) { /* optionally clear the elements */ 4089 if (opts & 0x2) { /* optionally clear the elements */
4093 memset ((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size); 4090 memset((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4094 } 4091 }
4095 4092
4096 /* If not provided, allocate the pointer array as final part of chunk */ 4093 /* If not provided, allocate the pointer array as final part of chunk */
4097 if (marray == 0) { 4094 if (marray == 0) {
4098 size_t array_chunk_size; 4095 size_t array_chunk_size;
4099 array_chunk = chunk_plus_offset (p, contents_size); 4096 array_chunk = chunk_plus_offset(p, contents_size);
4100 array_chunk_size = remainder_size - contents_size; 4097 array_chunk_size = remainder_size - contents_size;
4101 marray = (void **) (chunk2mem (array_chunk)); 4098 marray = (void **) (chunk2mem(array_chunk));
4102 set_size_and_pinuse_of_inuse_chunk (m, array_chunk, array_chunk_size); 4099 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
4103 remainder_size = contents_size; 4100 remainder_size = contents_size;
4104 } 4101 }
4105 4102
4106 /* split out elements */ 4103 /* split out elements */
4107 for (i = 0;; ++i) { 4104 for (i = 0;; ++i) {
4108 marray[i] = chunk2mem (p); 4105 marray[i] = chunk2mem(p);
4109 if (i != n_elements - 1) { 4106 if (i != n_elements - 1) {
4110 if (element_size != 0) 4107 if (element_size != 0)
4111 size = element_size; 4108 size = element_size;
4112 else 4109 else
4113 size = request2size (sizes[i]); 4110 size = request2size(sizes[i]);
4114 remainder_size -= size; 4111 remainder_size -= size;
4115 set_size_and_pinuse_of_inuse_chunk (m, p, size); 4112 set_size_and_pinuse_of_inuse_chunk(m, p, size);
4116 p = chunk_plus_offset (p, size); 4113 p = chunk_plus_offset(p, size);
4117 } else { /* the final element absorbs any overallocation slop */ 4114 } else { /* the final element absorbs any overallocation slop */
4118 set_size_and_pinuse_of_inuse_chunk (m, p, remainder_size); 4115 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
4119 break; 4116 break;
4120 } 4117 }
4121 } 4118 }
4122 4119
4123 #if DEBUG 4120 #if DEBUG
4124 if (marray != chunks) { 4121 if (marray != chunks) {
4125 /* final element must have exactly exhausted chunk */ 4122 /* final element must have exactly exhausted chunk */
4126 if (element_size != 0) { 4123 if (element_size != 0) {
4127 assert (remainder_size == element_size); 4124 assert(remainder_size == element_size);
4128 } else { 4125 } else {
4129 assert (remainder_size == request2size (sizes[i])); 4126 assert(remainder_size == request2size(sizes[i]));
4130 } 4127 }
4131 check_inuse_chunk (m, mem2chunk (marray)); 4128 check_inuse_chunk(m, mem2chunk(marray));
4132 } 4129 }
4133 for (i = 0; i != n_elements; ++i) 4130 for (i = 0; i != n_elements; ++i)
4134 check_inuse_chunk (m, mem2chunk (marray[i])); 4131 check_inuse_chunk(m, mem2chunk(marray[i]));
4135 4132
4136 #endif /* DEBUG */ 4133 #endif /* DEBUG */
4137 4134
4138 POSTACTION (m); 4135 POSTACTION(m);
4139 return marray; 4136 return marray;
4140 } 4137 }
4141 4138
4142 4139
4143 /* -------------------------- public routines ---------------------------- */ 4140 /* -------------------------- public routines ---------------------------- */
4144 4141
4145 #if !ONLY_MSPACES 4142 #if !ONLY_MSPACES
4146 4143
4147 void * 4144 void *
4148 dlmalloc (size_t bytes) 4145 dlmalloc(size_t bytes)
4149 { 4146 {
4150 /* 4147 /*
4151 Basic algorithm: 4148 Basic algorithm:
4152 If a small request (< 256 bytes minus per-chunk overhead): 4149 If a small request (< 256 bytes minus per-chunk overhead):
4153 1. If one exists, use a remainderless chunk in associated smallbin. 4150 1. If one exists, use a remainderless chunk in associated smallbin.
4168 5. If available, get memory from system and use it 4165 5. If available, get memory from system and use it
4169 4166
4170 The ugly goto's here ensure that postaction occurs along all paths. 4167 The ugly goto's here ensure that postaction occurs along all paths.
4171 */ 4168 */
4172 4169
4173 if (!PREACTION (gm)) { 4170 if (!PREACTION(gm)) {
4174 void *mem; 4171 void *mem;
4175 size_t nb; 4172 size_t nb;
4176 if (bytes <= MAX_SMALL_REQUEST) { 4173 if (bytes <= MAX_SMALL_REQUEST) {
4177 bindex_t idx; 4174 bindex_t idx;
4178 binmap_t smallbits; 4175 binmap_t smallbits;
4179 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request (bytes); 4176 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4180 idx = small_index (nb); 4177 idx = small_index(nb);
4181 smallbits = gm->smallmap >> idx; 4178 smallbits = gm->smallmap >> idx;
4182 4179
4183 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ 4180 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4184 mchunkptr b, p; 4181 mchunkptr b, p;
4185 idx += ~smallbits & 1; /* Uses next bin if idx empty */ 4182 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4186 b = smallbin_at (gm, idx); 4183 b = smallbin_at(gm, idx);
4187 p = b->fd; 4184 p = b->fd;
4188 assert (chunksize (p) == small_index2size (idx)); 4185 assert(chunksize(p) == small_index2size(idx));
4189 unlink_first_small_chunk (gm, b, p, idx); 4186 unlink_first_small_chunk(gm, b, p, idx);
4190 set_inuse_and_pinuse (gm, p, small_index2size (idx)); 4187 set_inuse_and_pinuse(gm, p, small_index2size(idx));
4191 mem = chunk2mem (p); 4188 mem = chunk2mem(p);
4192 check_malloced_chunk (gm, mem, nb); 4189 check_malloced_chunk(gm, mem, nb);
4193 goto postaction; 4190 goto postaction;
4194 } 4191 }
4195 4192
4196 else if (nb > gm->dvsize) { 4193 else if (nb > gm->dvsize) {
4197 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ 4194 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4198 mchunkptr b, p, r; 4195 mchunkptr b, p, r;
4199 size_t rsize; 4196 size_t rsize;
4200 bindex_t i; 4197 bindex_t i;
4201 binmap_t leftbits = 4198 binmap_t leftbits =
4202 (smallbits << idx) & left_bits (idx2bit (idx)); 4199 (smallbits << idx) & left_bits(idx2bit(idx));
4203 binmap_t leastbit = least_bit (leftbits); 4200 binmap_t leastbit = least_bit(leftbits);
4204 compute_bit2idx (leastbit, i); 4201 compute_bit2idx(leastbit, i);
4205 b = smallbin_at (gm, i); 4202 b = smallbin_at(gm, i);
4206 p = b->fd; 4203 p = b->fd;
4207 assert (chunksize (p) == small_index2size (i)); 4204 assert(chunksize(p) == small_index2size(i));
4208 unlink_first_small_chunk (gm, b, p, i); 4205 unlink_first_small_chunk(gm, b, p, i);
4209 rsize = small_index2size (i) - nb; 4206 rsize = small_index2size(i) - nb;
4210 /* Fit here cannot be remainderless if 4byte sizes */ 4207 /* Fit here cannot be remainderless if 4byte sizes */
4211 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) 4208 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4212 set_inuse_and_pinuse (gm, p, small_index2size (i)); 4209 set_inuse_and_pinuse(gm, p, small_index2size(i));
4213 else { 4210 else {
4214 set_size_and_pinuse_of_inuse_chunk (gm, p, nb); 4211 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4215 r = chunk_plus_offset (p, nb); 4212 r = chunk_plus_offset(p, nb);
4216 set_size_and_pinuse_of_free_chunk (r, rsize); 4213 set_size_and_pinuse_of_free_chunk(r, rsize);
4217 replace_dv (gm, r, rsize); 4214 replace_dv(gm, r, rsize);
4218 } 4215 }
4219 mem = chunk2mem (p); 4216 mem = chunk2mem(p);
4220 check_malloced_chunk (gm, mem, nb); 4217 check_malloced_chunk(gm, mem, nb);
4221 goto postaction; 4218 goto postaction;
4222 } 4219 }
4223 4220
4224 else if (gm->treemap != 0 4221 else if (gm->treemap != 0
4225 && (mem = tmalloc_small (gm, nb)) != 0) { 4222 && (mem = tmalloc_small(gm, nb)) != 0) {
4226 check_malloced_chunk (gm, mem, nb); 4223 check_malloced_chunk(gm, mem, nb);
4227 goto postaction; 4224 goto postaction;
4228 } 4225 }
4229 } 4226 }
4230 } else if (bytes >= MAX_REQUEST) 4227 } else if (bytes >= MAX_REQUEST)
4231 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ 4228 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4232 else { 4229 else {
4233 nb = pad_request (bytes); 4230 nb = pad_request(bytes);
4234 if (gm->treemap != 0 && (mem = tmalloc_large (gm, nb)) != 0) { 4231 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4235 check_malloced_chunk (gm, mem, nb); 4232 check_malloced_chunk(gm, mem, nb);
4236 goto postaction; 4233 goto postaction;
4237 } 4234 }
4238 } 4235 }
4239 4236
4240 if (nb <= gm->dvsize) { 4237 if (nb <= gm->dvsize) {
4241 size_t rsize = gm->dvsize - nb; 4238 size_t rsize = gm->dvsize - nb;
4242 mchunkptr p = gm->dv; 4239 mchunkptr p = gm->dv;
4243 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ 4240 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4244 mchunkptr r = gm->dv = chunk_plus_offset (p, nb); 4241 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4245 gm->dvsize = rsize; 4242 gm->dvsize = rsize;
4246 set_size_and_pinuse_of_free_chunk (r, rsize); 4243 set_size_and_pinuse_of_free_chunk(r, rsize);
4247 set_size_and_pinuse_of_inuse_chunk (gm, p, nb); 4244 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4248 } else { /* exhaust dv */ 4245 } else { /* exhaust dv */
4249 size_t dvs = gm->dvsize; 4246 size_t dvs = gm->dvsize;
4250 gm->dvsize = 0; 4247 gm->dvsize = 0;
4251 gm->dv = 0; 4248 gm->dv = 0;
4252 set_inuse_and_pinuse (gm, p, dvs); 4249 set_inuse_and_pinuse(gm, p, dvs);
4253 } 4250 }
4254 mem = chunk2mem (p); 4251 mem = chunk2mem(p);
4255 check_malloced_chunk (gm, mem, nb); 4252 check_malloced_chunk(gm, mem, nb);
4256 goto postaction; 4253 goto postaction;
4257 } 4254 }
4258 4255
4259 else if (nb < gm->topsize) { /* Split top */ 4256 else if (nb < gm->topsize) { /* Split top */
4260 size_t rsize = gm->topsize -= nb; 4257 size_t rsize = gm->topsize -= nb;
4261 mchunkptr p = gm->top; 4258 mchunkptr p = gm->top;
4262 mchunkptr r = gm->top = chunk_plus_offset (p, nb); 4259 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4263 r->head = rsize | PINUSE_BIT; 4260 r->head = rsize | PINUSE_BIT;
4264 set_size_and_pinuse_of_inuse_chunk (gm, p, nb); 4261 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4265 mem = chunk2mem (p); 4262 mem = chunk2mem(p);
4266 check_top_chunk (gm, gm->top); 4263 check_top_chunk(gm, gm->top);
4267 check_malloced_chunk (gm, mem, nb); 4264 check_malloced_chunk(gm, mem, nb);
4268 goto postaction; 4265 goto postaction;
4269 } 4266 }
4270 4267
4271 mem = sys_alloc (gm, nb); 4268 mem = sys_alloc(gm, nb);
4272 4269
4273 postaction: 4270 postaction:
4274 POSTACTION (gm); 4271 POSTACTION(gm);
4275 return mem; 4272 return mem;
4276 } 4273 }
4277 4274
4278 return 0; 4275 return 0;
4279 } 4276 }
4280 4277
4281 void 4278 void
4282 dlfree (void *mem) 4279 dlfree(void *mem)
4283 { 4280 {
4284 /* 4281 /*
4285 Consolidate freed chunks with preceeding or succeeding bordering 4282 Consolidate freed chunks with preceeding or succeeding bordering
4286 free chunks, if they exist, and then place in a bin. Intermixed 4283 free chunks, if they exist, and then place in a bin. Intermixed
4287 with special cases for top, dv, mmapped chunks, and usage errors. 4284 with special cases for top, dv, mmapped chunks, and usage errors.
4288 */ 4285 */
4289 4286
4290 if (mem != 0) { 4287 if (mem != 0) {
4291 mchunkptr p = mem2chunk (mem); 4288 mchunkptr p = mem2chunk(mem);
4292 #if FOOTERS 4289 #if FOOTERS
4293 mstate fm = get_mstate_for (p); 4290 mstate fm = get_mstate_for(p);
4294 if (!ok_magic (fm)) { 4291 if (!ok_magic(fm)) {
4295 USAGE_ERROR_ACTION (fm, p); 4292 USAGE_ERROR_ACTION(fm, p);
4296 return; 4293 return;
4297 } 4294 }
4298 #else /* FOOTERS */ 4295 #else /* FOOTERS */
4299 #define fm gm 4296 #define fm gm
4300 #endif /* FOOTERS */ 4297 #endif /* FOOTERS */
4301 if (!PREACTION (fm)) { 4298 if (!PREACTION(fm)) {
4302 check_inuse_chunk (fm, p); 4299 check_inuse_chunk(fm, p);
4303 if (RTCHECK (ok_address (fm, p) && ok_cinuse (p))) { 4300 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4304 size_t psize = chunksize (p); 4301 size_t psize = chunksize(p);
4305 mchunkptr next = chunk_plus_offset (p, psize); 4302 mchunkptr next = chunk_plus_offset(p, psize);
4306 if (!pinuse (p)) { 4303 if (!pinuse(p)) {
4307 size_t prevsize = p->prev_foot; 4304 size_t prevsize = p->prev_foot;
4308 if ((prevsize & IS_MMAPPED_BIT) != 0) { 4305 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4309 prevsize &= ~IS_MMAPPED_BIT; 4306 prevsize &= ~IS_MMAPPED_BIT;
4310 psize += prevsize + MMAP_FOOT_PAD; 4307 psize += prevsize + MMAP_FOOT_PAD;
4311 if (CALL_MUNMAP ((char *) p - prevsize, psize) == 0) 4308 if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4312 fm->footprint -= psize; 4309 fm->footprint -= psize;
4313 goto postaction; 4310 goto postaction;
4314 } else { 4311 } else {
4315 mchunkptr prev = chunk_minus_offset (p, prevsize); 4312 mchunkptr prev = chunk_minus_offset(p, prevsize);
4316 psize += prevsize; 4313 psize += prevsize;
4317 p = prev; 4314 p = prev;
4318 if (RTCHECK (ok_address (fm, prev))) { /* consolidate backward */ 4315 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4319 if (p != fm->dv) { 4316 if (p != fm->dv) {
4320 unlink_chunk (fm, p, prevsize); 4317 unlink_chunk(fm, p, prevsize);
4321 } else if ((next->head & INUSE_BITS) == 4318 } else if ((next->head & INUSE_BITS) ==
4322 INUSE_BITS) { 4319 INUSE_BITS) {
4323 fm->dvsize = psize; 4320 fm->dvsize = psize;
4324 set_free_with_pinuse (p, psize, next); 4321 set_free_with_pinuse(p, psize, next);
4325 goto postaction; 4322 goto postaction;
4326 } 4323 }
4327 } else 4324 } else
4328 goto erroraction; 4325 goto erroraction;
4329 } 4326 }
4330 } 4327 }
4331 4328
4332 if (RTCHECK (ok_next (p, next) && ok_pinuse (next))) { 4329 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4333 if (!cinuse (next)) { /* consolidate forward */ 4330 if (!cinuse(next)) { /* consolidate forward */
4334 if (next == fm->top) { 4331 if (next == fm->top) {
4335 size_t tsize = fm->topsize += psize; 4332 size_t tsize = fm->topsize += psize;
4336 fm->top = p; 4333 fm->top = p;
4337 p->head = tsize | PINUSE_BIT; 4334 p->head = tsize | PINUSE_BIT;
4338 if (p == fm->dv) { 4335 if (p == fm->dv) {
4339 fm->dv = 0; 4336 fm->dv = 0;
4340 fm->dvsize = 0; 4337 fm->dvsize = 0;
4341 } 4338 }
4342 if (should_trim (fm, tsize)) 4339 if (should_trim(fm, tsize))
4343 sys_trim (fm, 0); 4340 sys_trim(fm, 0);
4344 goto postaction; 4341 goto postaction;
4345 } else if (next == fm->dv) { 4342 } else if (next == fm->dv) {
4346 size_t dsize = fm->dvsize += psize; 4343 size_t dsize = fm->dvsize += psize;
4347 fm->dv = p; 4344 fm->dv = p;
4348 set_size_and_pinuse_of_free_chunk (p, dsize); 4345 set_size_and_pinuse_of_free_chunk(p, dsize);
4349 goto postaction; 4346 goto postaction;
4350 } else { 4347 } else {
4351 size_t nsize = chunksize (next); 4348 size_t nsize = chunksize(next);
4352 psize += nsize; 4349 psize += nsize;
4353 unlink_chunk (fm, next, nsize); 4350 unlink_chunk(fm, next, nsize);
4354 set_size_and_pinuse_of_free_chunk (p, psize); 4351 set_size_and_pinuse_of_free_chunk(p, psize);
4355 if (p == fm->dv) { 4352 if (p == fm->dv) {
4356 fm->dvsize = psize; 4353 fm->dvsize = psize;
4357 goto postaction; 4354 goto postaction;
4358 } 4355 }
4359 } 4356 }
4360 } else 4357 } else
4361 set_free_with_pinuse (p, psize, next); 4358 set_free_with_pinuse(p, psize, next);
4362 insert_chunk (fm, p, psize); 4359 insert_chunk(fm, p, psize);
4363 check_free_chunk (fm, p); 4360 check_free_chunk(fm, p);
4364 goto postaction; 4361 goto postaction;
4365 } 4362 }
4366 } 4363 }
4367 erroraction: 4364 erroraction:
4368 USAGE_ERROR_ACTION (fm, p); 4365 USAGE_ERROR_ACTION(fm, p);
4369 postaction: 4366 postaction:
4370 POSTACTION (fm); 4367 POSTACTION(fm);
4371 } 4368 }
4372 } 4369 }
4373 #if !FOOTERS 4370 #if !FOOTERS
4374 #undef fm 4371 #undef fm
4375 #endif /* FOOTERS */ 4372 #endif /* FOOTERS */
4376 } 4373 }
4377 4374
4378 void * 4375 void *
4379 dlcalloc (size_t n_elements, size_t elem_size) 4376 dlcalloc(size_t n_elements, size_t elem_size)
4380 { 4377 {
4381 void *mem; 4378 void *mem;
4382 size_t req = 0; 4379 size_t req = 0;
4383 if (n_elements != 0) { 4380 if (n_elements != 0) {
4384 req = n_elements * elem_size; 4381 req = n_elements * elem_size;
4385 if (((n_elements | elem_size) & ~(size_t) 0xffff) && 4382 if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4386 (req / n_elements != elem_size)) 4383 (req / n_elements != elem_size))
4387 req = MAX_SIZE_T; /* force downstream failure on overflow */ 4384 req = MAX_SIZE_T; /* force downstream failure on overflow */
4388 } 4385 }
4389 mem = dlmalloc (req); 4386 mem = dlmalloc(req);
4390 if (mem != 0 && calloc_must_clear (mem2chunk (mem))) 4387 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4391 memset (mem, 0, req); 4388 memset(mem, 0, req);
4392 return mem; 4389 return mem;
4393 } 4390 }
4394 4391
4395 void * 4392 void *
4396 dlrealloc (void *oldmem, size_t bytes) 4393 dlrealloc(void *oldmem, size_t bytes)
4397 { 4394 {
4398 if (oldmem == 0) 4395 if (oldmem == 0)
4399 return dlmalloc (bytes); 4396 return dlmalloc(bytes);
4400 #ifdef REALLOC_ZERO_BYTES_FREES 4397 #ifdef REALLOC_ZERO_BYTES_FREES
4401 if (bytes == 0) { 4398 if (bytes == 0) {
4402 dlfree (oldmem); 4399 dlfree(oldmem);
4403 return 0; 4400 return 0;
4404 } 4401 }
4405 #endif /* REALLOC_ZERO_BYTES_FREES */ 4402 #endif /* REALLOC_ZERO_BYTES_FREES */
4406 else { 4403 else {
4407 #if ! FOOTERS 4404 #if ! FOOTERS
4408 mstate m = gm; 4405 mstate m = gm;
4409 #else /* FOOTERS */ 4406 #else /* FOOTERS */
4410 mstate m = get_mstate_for (mem2chunk (oldmem)); 4407 mstate m = get_mstate_for(mem2chunk(oldmem));
4411 if (!ok_magic (m)) { 4408 if (!ok_magic(m)) {
4412 USAGE_ERROR_ACTION (m, oldmem); 4409 USAGE_ERROR_ACTION(m, oldmem);
4413 return 0; 4410 return 0;
4414 } 4411 }
4415 #endif /* FOOTERS */ 4412 #endif /* FOOTERS */
4416 return internal_realloc (m, oldmem, bytes); 4413 return internal_realloc(m, oldmem, bytes);
4417 } 4414 }
4418 } 4415 }
4419 4416
4420 void * 4417 void *
4421 dlmemalign (size_t alignment, size_t bytes) 4418 dlmemalign(size_t alignment, size_t bytes)
4422 { 4419 {
4423 return internal_memalign (gm, alignment, bytes); 4420 return internal_memalign(gm, alignment, bytes);
4424 } 4421 }
4425 4422
4426 void ** 4423 void **
4427 dlindependent_calloc (size_t n_elements, size_t elem_size, void *chunks[]) 4424 dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
4428 { 4425 {
4429 size_t sz = elem_size; /* serves as 1-element array */ 4426 size_t sz = elem_size; /* serves as 1-element array */
4430 return ialloc (gm, n_elements, &sz, 3, chunks); 4427 return ialloc(gm, n_elements, &sz, 3, chunks);
4431 } 4428 }
4432 4429
4433 void ** 4430 void **
4434 dlindependent_comalloc (size_t n_elements, size_t sizes[], void *chunks[]) 4431 dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
4435 { 4432 {
4436 return ialloc (gm, n_elements, sizes, 0, chunks); 4433 return ialloc(gm, n_elements, sizes, 0, chunks);
4437 } 4434 }
4438 4435
4439 void * 4436 void *
4440 dlvalloc (size_t bytes) 4437 dlvalloc(size_t bytes)
4441 { 4438 {
4442 size_t pagesz; 4439 size_t pagesz;
4443 init_mparams (); 4440 init_mparams();
4444 pagesz = mparams.page_size; 4441 pagesz = mparams.page_size;
4445 return dlmemalign (pagesz, bytes); 4442 return dlmemalign(pagesz, bytes);
4446 } 4443 }
4447 4444
4448 void * 4445 void *
4449 dlpvalloc (size_t bytes) 4446 dlpvalloc(size_t bytes)
4450 { 4447 {
4451 size_t pagesz; 4448 size_t pagesz;
4452 init_mparams (); 4449 init_mparams();
4453 pagesz = mparams.page_size; 4450 pagesz = mparams.page_size;
4454 return dlmemalign (pagesz, 4451 return dlmemalign(pagesz,
4455 (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - 4452 (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
4456 SIZE_T_ONE));
4457 } 4453 }
4458 4454
4459 int 4455 int
4460 dlmalloc_trim (size_t pad) 4456 dlmalloc_trim(size_t pad)
4461 { 4457 {
4462 int result = 0; 4458 int result = 0;
4463 if (!PREACTION (gm)) { 4459 if (!PREACTION(gm)) {
4464 result = sys_trim (gm, pad); 4460 result = sys_trim(gm, pad);
4465 POSTACTION (gm); 4461 POSTACTION(gm);
4466 } 4462 }
4467 return result; 4463 return result;
4468 } 4464 }
4469 4465
4470 size_t 4466 size_t
4471 dlmalloc_footprint (void) 4467 dlmalloc_footprint(void)
4472 { 4468 {
4473 return gm->footprint; 4469 return gm->footprint;
4474 } 4470 }
4475 4471
4476 size_t 4472 size_t
4477 dlmalloc_max_footprint (void) 4473 dlmalloc_max_footprint(void)
4478 { 4474 {
4479 return gm->max_footprint; 4475 return gm->max_footprint;
4480 } 4476 }
4481 4477
4482 #if !NO_MALLINFO 4478 #if !NO_MALLINFO
4483 struct mallinfo 4479 struct mallinfo
4484 dlmallinfo (void) 4480 dlmallinfo(void)
4485 { 4481 {
4486 return internal_mallinfo (gm); 4482 return internal_mallinfo(gm);
4487 } 4483 }
4488 #endif /* NO_MALLINFO */ 4484 #endif /* NO_MALLINFO */
4489 4485
4490 void 4486 void
4491 dlmalloc_stats () 4487 dlmalloc_stats()
4492 { 4488 {
4493 internal_malloc_stats (gm); 4489 internal_malloc_stats(gm);
4494 } 4490 }
4495 4491
4496 size_t 4492 size_t
4497 dlmalloc_usable_size (void *mem) 4493 dlmalloc_usable_size(void *mem)
4498 { 4494 {
4499 if (mem != 0) { 4495 if (mem != 0) {
4500 mchunkptr p = mem2chunk (mem); 4496 mchunkptr p = mem2chunk(mem);
4501 if (cinuse (p)) 4497 if (cinuse(p))
4502 return chunksize (p) - overhead_for (p); 4498 return chunksize(p) - overhead_for(p);
4503 } 4499 }
4504 return 0; 4500 return 0;
4505 } 4501 }
4506 4502
4507 int 4503 int
4508 dlmallopt (int param_number, int value) 4504 dlmallopt(int param_number, int value)
4509 { 4505 {
4510 return change_mparam (param_number, value); 4506 return change_mparam(param_number, value);
4511 } 4507 }
4512 4508
4513 #endif /* !ONLY_MSPACES */ 4509 #endif /* !ONLY_MSPACES */
4514 4510
4515 /* ----------------------------- user mspaces ---------------------------- */ 4511 /* ----------------------------- user mspaces ---------------------------- */
4516 4512
4517 #if MSPACES 4513 #if MSPACES
4518 4514
4519 static mstate 4515 static mstate
4520 init_user_mstate (char *tbase, size_t tsize) 4516 init_user_mstate(char *tbase, size_t tsize)
4521 { 4517 {
4522 size_t msize = pad_request (sizeof (struct malloc_state)); 4518 size_t msize = pad_request(sizeof(struct malloc_state));
4523 mchunkptr mn; 4519 mchunkptr mn;
4524 mchunkptr msp = align_as_chunk (tbase); 4520 mchunkptr msp = align_as_chunk(tbase);
4525 mstate m = (mstate) (chunk2mem (msp)); 4521 mstate m = (mstate) (chunk2mem(msp));
4526 memset (m, 0, msize); 4522 memset(m, 0, msize);
4527 INITIAL_LOCK (&m->mutex); 4523 INITIAL_LOCK(&m->mutex);
4528 msp->head = (msize | PINUSE_BIT | CINUSE_BIT); 4524 msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
4529 m->seg.base = m->least_addr = tbase; 4525 m->seg.base = m->least_addr = tbase;
4530 m->seg.size = m->footprint = m->max_footprint = tsize; 4526 m->seg.size = m->footprint = m->max_footprint = tsize;
4531 m->magic = mparams.magic; 4527 m->magic = mparams.magic;
4532 m->mflags = mparams.default_mflags; 4528 m->mflags = mparams.default_mflags;
4533 disable_contiguous (m); 4529 disable_contiguous(m);
4534 init_bins (m); 4530 init_bins(m);
4535 mn = next_chunk (mem2chunk (m)); 4531 mn = next_chunk(mem2chunk(m));
4536 init_top (m, mn, 4532 init_top(m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
4537 (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE); 4533 check_top_chunk(m, m->top);
4538 check_top_chunk (m, m->top);
4539 return m; 4534 return m;
4540 } 4535 }
4541 4536
4542 mspace 4537 mspace
4543 create_mspace (size_t capacity, int locked) 4538 create_mspace(size_t capacity, int locked)
4544 { 4539 {
4545 mstate m = 0; 4540 mstate m = 0;
4546 size_t msize = pad_request (sizeof (struct malloc_state)); 4541 size_t msize = pad_request(sizeof(struct malloc_state));
4547 init_mparams (); /* Ensure pagesize etc initialized */ 4542 init_mparams(); /* Ensure pagesize etc initialized */
4548 4543
4549 if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { 4544 if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4550 size_t rs = ((capacity == 0) ? mparams.granularity : 4545 size_t rs = ((capacity == 0) ? mparams.granularity :
4551 (capacity + TOP_FOOT_SIZE + msize)); 4546 (capacity + TOP_FOOT_SIZE + msize));
4552 size_t tsize = granularity_align (rs); 4547 size_t tsize = granularity_align(rs);
4553 char *tbase = (char *) (CALL_MMAP (tsize)); 4548 char *tbase = (char *) (CALL_MMAP(tsize));
4554 if (tbase != CMFAIL) { 4549 if (tbase != CMFAIL) {
4555 m = init_user_mstate (tbase, tsize); 4550 m = init_user_mstate(tbase, tsize);
4556 m->seg.sflags = IS_MMAPPED_BIT; 4551 m->seg.sflags = IS_MMAPPED_BIT;
4557 set_lock (m, locked); 4552 set_lock(m, locked);
4558 } 4553 }
4559 } 4554 }
4560 return (mspace) m; 4555 return (mspace) m;
4561 } 4556 }
4562 4557
4563 mspace 4558 mspace
4564 create_mspace_with_base (void *base, size_t capacity, int locked) 4559 create_mspace_with_base(void *base, size_t capacity, int locked)
4565 { 4560 {
4566 mstate m = 0; 4561 mstate m = 0;
4567 size_t msize = pad_request (sizeof (struct malloc_state)); 4562 size_t msize = pad_request(sizeof(struct malloc_state));
4568 init_mparams (); /* Ensure pagesize etc initialized */ 4563 init_mparams(); /* Ensure pagesize etc initialized */
4569 4564
4570 if (capacity > msize + TOP_FOOT_SIZE && 4565 if (capacity > msize + TOP_FOOT_SIZE &&
4571 capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) { 4566 capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4572 m = init_user_mstate ((char *) base, capacity); 4567 m = init_user_mstate((char *) base, capacity);
4573 m->seg.sflags = EXTERN_BIT; 4568 m->seg.sflags = EXTERN_BIT;
4574 set_lock (m, locked); 4569 set_lock(m, locked);
4575 } 4570 }
4576 return (mspace) m; 4571 return (mspace) m;
4577 } 4572 }
4578 4573
4579 size_t 4574 size_t
4580 destroy_mspace (mspace msp) 4575 destroy_mspace(mspace msp)
4581 { 4576 {
4582 size_t freed = 0; 4577 size_t freed = 0;
4583 mstate ms = (mstate) msp; 4578 mstate ms = (mstate) msp;
4584 if (ok_magic (ms)) { 4579 if (ok_magic(ms)) {
4585 msegmentptr sp = &ms->seg; 4580 msegmentptr sp = &ms->seg;
4586 while (sp != 0) { 4581 while (sp != 0) {
4587 char *base = sp->base; 4582 char *base = sp->base;
4588 size_t size = sp->size; 4583 size_t size = sp->size;
4589 flag_t flag = sp->sflags; 4584 flag_t flag = sp->sflags;
4590 sp = sp->next; 4585 sp = sp->next;
4591 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && 4586 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
4592 CALL_MUNMAP (base, size) == 0) 4587 CALL_MUNMAP(base, size) == 0)
4593 freed += size; 4588 freed += size;
4594 } 4589 }
4595 } else { 4590 } else {
4596 USAGE_ERROR_ACTION (ms, ms); 4591 USAGE_ERROR_ACTION(ms, ms);
4597 } 4592 }
4598 return freed; 4593 return freed;
4599 } 4594 }
4600 4595
4601 /* 4596 /*
4603 versions. This is not so nice but better than the alternatives. 4598 versions. This is not so nice but better than the alternatives.
4604 */ 4599 */
4605 4600
4606 4601
4607 void * 4602 void *
4608 mspace_malloc (mspace msp, size_t bytes) 4603 mspace_malloc(mspace msp, size_t bytes)
4609 { 4604 {
4610 mstate ms = (mstate) msp; 4605 mstate ms = (mstate) msp;
4611 if (!ok_magic (ms)) { 4606 if (!ok_magic(ms)) {
4612 USAGE_ERROR_ACTION (ms, ms); 4607 USAGE_ERROR_ACTION(ms, ms);
4613 return 0; 4608 return 0;
4614 } 4609 }
4615 if (!PREACTION (ms)) { 4610 if (!PREACTION(ms)) {
4616 void *mem; 4611 void *mem;
4617 size_t nb; 4612 size_t nb;
4618 if (bytes <= MAX_SMALL_REQUEST) { 4613 if (bytes <= MAX_SMALL_REQUEST) {
4619 bindex_t idx; 4614 bindex_t idx;
4620 binmap_t smallbits; 4615 binmap_t smallbits;
4621 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request (bytes); 4616 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4622 idx = small_index (nb); 4617 idx = small_index(nb);
4623 smallbits = ms->smallmap >> idx; 4618 smallbits = ms->smallmap >> idx;
4624 4619
4625 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ 4620 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4626 mchunkptr b, p; 4621 mchunkptr b, p;
4627 idx += ~smallbits & 1; /* Uses next bin if idx empty */ 4622 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4628 b = smallbin_at (ms, idx); 4623 b = smallbin_at(ms, idx);
4629 p = b->fd; 4624 p = b->fd;
4630 assert (chunksize (p) == small_index2size (idx)); 4625 assert(chunksize(p) == small_index2size(idx));
4631 unlink_first_small_chunk (ms, b, p, idx); 4626 unlink_first_small_chunk(ms, b, p, idx);
4632 set_inuse_and_pinuse (ms, p, small_index2size (idx)); 4627 set_inuse_and_pinuse(ms, p, small_index2size(idx));
4633 mem = chunk2mem (p); 4628 mem = chunk2mem(p);
4634 check_malloced_chunk (ms, mem, nb); 4629 check_malloced_chunk(ms, mem, nb);
4635 goto postaction; 4630 goto postaction;
4636 } 4631 }
4637 4632
4638 else if (nb > ms->dvsize) { 4633 else if (nb > ms->dvsize) {
4639 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ 4634 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4640 mchunkptr b, p, r; 4635 mchunkptr b, p, r;
4641 size_t rsize; 4636 size_t rsize;
4642 bindex_t i; 4637 bindex_t i;
4643 binmap_t leftbits = 4638 binmap_t leftbits =
4644 (smallbits << idx) & left_bits (idx2bit (idx)); 4639 (smallbits << idx) & left_bits(idx2bit(idx));
4645 binmap_t leastbit = least_bit (leftbits); 4640 binmap_t leastbit = least_bit(leftbits);
4646 compute_bit2idx (leastbit, i); 4641 compute_bit2idx(leastbit, i);
4647 b = smallbin_at (ms, i); 4642 b = smallbin_at(ms, i);
4648 p = b->fd; 4643 p = b->fd;
4649 assert (chunksize (p) == small_index2size (i)); 4644 assert(chunksize(p) == small_index2size(i));
4650 unlink_first_small_chunk (ms, b, p, i); 4645 unlink_first_small_chunk(ms, b, p, i);
4651 rsize = small_index2size (i) - nb; 4646 rsize = small_index2size(i) - nb;
4652 /* Fit here cannot be remainderless if 4byte sizes */ 4647 /* Fit here cannot be remainderless if 4byte sizes */
4653 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) 4648 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4654 set_inuse_and_pinuse (ms, p, small_index2size (i)); 4649 set_inuse_and_pinuse(ms, p, small_index2size(i));
4655 else { 4650 else {
4656 set_size_and_pinuse_of_inuse_chunk (ms, p, nb); 4651 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4657 r = chunk_plus_offset (p, nb); 4652 r = chunk_plus_offset(p, nb);
4658 set_size_and_pinuse_of_free_chunk (r, rsize); 4653 set_size_and_pinuse_of_free_chunk(r, rsize);
4659 replace_dv (ms, r, rsize); 4654 replace_dv(ms, r, rsize);
4660 } 4655 }
4661 mem = chunk2mem (p); 4656 mem = chunk2mem(p);
4662 check_malloced_chunk (ms, mem, nb); 4657 check_malloced_chunk(ms, mem, nb);
4663 goto postaction; 4658 goto postaction;
4664 } 4659 }
4665 4660
4666 else if (ms->treemap != 0 4661 else if (ms->treemap != 0
4667 && (mem = tmalloc_small (ms, nb)) != 0) { 4662 && (mem = tmalloc_small(ms, nb)) != 0) {
4668 check_malloced_chunk (ms, mem, nb); 4663 check_malloced_chunk(ms, mem, nb);
4669 goto postaction; 4664 goto postaction;
4670 } 4665 }
4671 } 4666 }
4672 } else if (bytes >= MAX_REQUEST) 4667 } else if (bytes >= MAX_REQUEST)
4673 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ 4668 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4674 else { 4669 else {
4675 nb = pad_request (bytes); 4670 nb = pad_request(bytes);
4676 if (ms->treemap != 0 && (mem = tmalloc_large (ms, nb)) != 0) { 4671 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4677 check_malloced_chunk (ms, mem, nb); 4672 check_malloced_chunk(ms, mem, nb);
4678 goto postaction; 4673 goto postaction;
4679 } 4674 }
4680 } 4675 }
4681 4676
4682 if (nb <= ms->dvsize) { 4677 if (nb <= ms->dvsize) {
4683 size_t rsize = ms->dvsize - nb; 4678 size_t rsize = ms->dvsize - nb;
4684 mchunkptr p = ms->dv; 4679 mchunkptr p = ms->dv;
4685 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ 4680 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4686 mchunkptr r = ms->dv = chunk_plus_offset (p, nb); 4681 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4687 ms->dvsize = rsize; 4682 ms->dvsize = rsize;
4688 set_size_and_pinuse_of_free_chunk (r, rsize); 4683 set_size_and_pinuse_of_free_chunk(r, rsize);
4689 set_size_and_pinuse_of_inuse_chunk (ms, p, nb); 4684 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4690 } else { /* exhaust dv */ 4685 } else { /* exhaust dv */
4691 size_t dvs = ms->dvsize; 4686 size_t dvs = ms->dvsize;
4692 ms->dvsize = 0; 4687 ms->dvsize = 0;
4693 ms->dv = 0; 4688 ms->dv = 0;
4694 set_inuse_and_pinuse (ms, p, dvs); 4689 set_inuse_and_pinuse(ms, p, dvs);
4695 } 4690 }
4696 mem = chunk2mem (p); 4691 mem = chunk2mem(p);
4697 check_malloced_chunk (ms, mem, nb); 4692 check_malloced_chunk(ms, mem, nb);
4698 goto postaction; 4693 goto postaction;
4699 } 4694 }
4700 4695
4701 else if (nb < ms->topsize) { /* Split top */ 4696 else if (nb < ms->topsize) { /* Split top */
4702 size_t rsize = ms->topsize -= nb; 4697 size_t rsize = ms->topsize -= nb;
4703 mchunkptr p = ms->top; 4698 mchunkptr p = ms->top;
4704 mchunkptr r = ms->top = chunk_plus_offset (p, nb); 4699 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4705 r->head = rsize | PINUSE_BIT; 4700 r->head = rsize | PINUSE_BIT;
4706 set_size_and_pinuse_of_inuse_chunk (ms, p, nb); 4701 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4707 mem = chunk2mem (p); 4702 mem = chunk2mem(p);
4708 check_top_chunk (ms, ms->top); 4703 check_top_chunk(ms, ms->top);
4709 check_malloced_chunk (ms, mem, nb); 4704 check_malloced_chunk(ms, mem, nb);
4710 goto postaction; 4705 goto postaction;
4711 } 4706 }
4712 4707
4713 mem = sys_alloc (ms, nb); 4708 mem = sys_alloc(ms, nb);
4714 4709
4715 postaction: 4710 postaction:
4716 POSTACTION (ms); 4711 POSTACTION(ms);
4717 return mem; 4712 return mem;
4718 } 4713 }
4719 4714
4720 return 0; 4715 return 0;
4721 } 4716 }
4722 4717
4723 void 4718 void
4724 mspace_free (mspace msp, void *mem) 4719 mspace_free(mspace msp, void *mem)
4725 { 4720 {
4726 if (mem != 0) { 4721 if (mem != 0) {
4727 mchunkptr p = mem2chunk (mem); 4722 mchunkptr p = mem2chunk(mem);
4728 #if FOOTERS 4723 #if FOOTERS
4729 mstate fm = get_mstate_for (p); 4724 mstate fm = get_mstate_for(p);
4730 #else /* FOOTERS */ 4725 #else /* FOOTERS */
4731 mstate fm = (mstate) msp; 4726 mstate fm = (mstate) msp;
4732 #endif /* FOOTERS */ 4727 #endif /* FOOTERS */
4733 if (!ok_magic (fm)) { 4728 if (!ok_magic(fm)) {
4734 USAGE_ERROR_ACTION (fm, p); 4729 USAGE_ERROR_ACTION(fm, p);
4735 return; 4730 return;
4736 } 4731 }
4737 if (!PREACTION (fm)) { 4732 if (!PREACTION(fm)) {
4738 check_inuse_chunk (fm, p); 4733 check_inuse_chunk(fm, p);
4739 if (RTCHECK (ok_address (fm, p) && ok_cinuse (p))) { 4734 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4740 size_t psize = chunksize (p); 4735 size_t psize = chunksize(p);
4741 mchunkptr next = chunk_plus_offset (p, psize); 4736 mchunkptr next = chunk_plus_offset(p, psize);
4742 if (!pinuse (p)) { 4737 if (!pinuse(p)) {
4743 size_t prevsize = p->prev_foot; 4738 size_t prevsize = p->prev_foot;
4744 if ((prevsize & IS_MMAPPED_BIT) != 0) { 4739 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4745 prevsize &= ~IS_MMAPPED_BIT; 4740 prevsize &= ~IS_MMAPPED_BIT;
4746 psize += prevsize + MMAP_FOOT_PAD; 4741 psize += prevsize + MMAP_FOOT_PAD;
4747 if (CALL_MUNMAP ((char *) p - prevsize, psize) == 0) 4742 if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4748 fm->footprint -= psize; 4743 fm->footprint -= psize;
4749 goto postaction; 4744 goto postaction;
4750 } else { 4745 } else {
4751 mchunkptr prev = chunk_minus_offset (p, prevsize); 4746 mchunkptr prev = chunk_minus_offset(p, prevsize);
4752 psize += prevsize; 4747 psize += prevsize;
4753 p = prev; 4748 p = prev;
4754 if (RTCHECK (ok_address (fm, prev))) { /* consolidate backward */ 4749 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4755 if (p != fm->dv) { 4750 if (p != fm->dv) {
4756 unlink_chunk (fm, p, prevsize); 4751 unlink_chunk(fm, p, prevsize);
4757 } else if ((next->head & INUSE_BITS) == 4752 } else if ((next->head & INUSE_BITS) ==
4758 INUSE_BITS) { 4753 INUSE_BITS) {
4759 fm->dvsize = psize; 4754 fm->dvsize = psize;
4760 set_free_with_pinuse (p, psize, next); 4755 set_free_with_pinuse(p, psize, next);
4761 goto postaction; 4756 goto postaction;
4762 } 4757 }
4763 } else 4758 } else
4764 goto erroraction; 4759 goto erroraction;
4765 } 4760 }
4766 } 4761 }
4767 4762
4768 if (RTCHECK (ok_next (p, next) && ok_pinuse (next))) { 4763 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4769 if (!cinuse (next)) { /* consolidate forward */ 4764 if (!cinuse(next)) { /* consolidate forward */
4770 if (next == fm->top) { 4765 if (next == fm->top) {
4771 size_t tsize = fm->topsize += psize; 4766 size_t tsize = fm->topsize += psize;
4772 fm->top = p; 4767 fm->top = p;
4773 p->head = tsize | PINUSE_BIT; 4768 p->head = tsize | PINUSE_BIT;
4774 if (p == fm->dv) { 4769 if (p == fm->dv) {
4775 fm->dv = 0; 4770 fm->dv = 0;
4776 fm->dvsize = 0; 4771 fm->dvsize = 0;
4777 } 4772 }
4778 if (should_trim (fm, tsize)) 4773 if (should_trim(fm, tsize))
4779 sys_trim (fm, 0); 4774 sys_trim(fm, 0);
4780 goto postaction; 4775 goto postaction;
4781 } else if (next == fm->dv) { 4776 } else if (next == fm->dv) {
4782 size_t dsize = fm->dvsize += psize; 4777 size_t dsize = fm->dvsize += psize;
4783 fm->dv = p; 4778 fm->dv = p;
4784 set_size_and_pinuse_of_free_chunk (p, dsize); 4779 set_size_and_pinuse_of_free_chunk(p, dsize);
4785 goto postaction; 4780 goto postaction;
4786 } else { 4781 } else {
4787 size_t nsize = chunksize (next); 4782 size_t nsize = chunksize(next);
4788 psize += nsize; 4783 psize += nsize;
4789 unlink_chunk (fm, next, nsize); 4784 unlink_chunk(fm, next, nsize);
4790 set_size_and_pinuse_of_free_chunk (p, psize); 4785 set_size_and_pinuse_of_free_chunk(p, psize);
4791 if (p == fm->dv) { 4786 if (p == fm->dv) {
4792 fm->dvsize = psize; 4787 fm->dvsize = psize;
4793 goto postaction; 4788 goto postaction;
4794 } 4789 }
4795 } 4790 }
4796 } else 4791 } else
4797 set_free_with_pinuse (p, psize, next); 4792 set_free_with_pinuse(p, psize, next);
4798 insert_chunk (fm, p, psize); 4793 insert_chunk(fm, p, psize);
4799 check_free_chunk (fm, p); 4794 check_free_chunk(fm, p);
4800 goto postaction; 4795 goto postaction;
4801 } 4796 }
4802 } 4797 }
4803 erroraction: 4798 erroraction:
4804 USAGE_ERROR_ACTION (fm, p); 4799 USAGE_ERROR_ACTION(fm, p);
4805 postaction: 4800 postaction:
4806 POSTACTION (fm); 4801 POSTACTION(fm);
4807 } 4802 }
4808 } 4803 }
4809 } 4804 }
4810 4805
4811 void * 4806 void *
4812 mspace_calloc (mspace msp, size_t n_elements, size_t elem_size) 4807 mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
4813 { 4808 {
4814 void *mem; 4809 void *mem;
4815 size_t req = 0; 4810 size_t req = 0;
4816 mstate ms = (mstate) msp; 4811 mstate ms = (mstate) msp;
4817 if (!ok_magic (ms)) { 4812 if (!ok_magic(ms)) {
4818 USAGE_ERROR_ACTION (ms, ms); 4813 USAGE_ERROR_ACTION(ms, ms);
4819 return 0; 4814 return 0;
4820 } 4815 }
4821 if (n_elements != 0) { 4816 if (n_elements != 0) {
4822 req = n_elements * elem_size; 4817 req = n_elements * elem_size;
4823 if (((n_elements | elem_size) & ~(size_t) 0xffff) && 4818 if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4824 (req / n_elements != elem_size)) 4819 (req / n_elements != elem_size))
4825 req = MAX_SIZE_T; /* force downstream failure on overflow */ 4820 req = MAX_SIZE_T; /* force downstream failure on overflow */
4826 } 4821 }
4827 mem = internal_malloc (ms, req); 4822 mem = internal_malloc(ms, req);
4828 if (mem != 0 && calloc_must_clear (mem2chunk (mem))) 4823 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4829 memset (mem, 0, req); 4824 memset(mem, 0, req);
4830 return mem; 4825 return mem;
4831 } 4826 }
4832 4827
4833 void * 4828 void *
4834 mspace_realloc (mspace msp, void *oldmem, size_t bytes) 4829 mspace_realloc(mspace msp, void *oldmem, size_t bytes)
4835 { 4830 {
4836 if (oldmem == 0) 4831 if (oldmem == 0)
4837 return mspace_malloc (msp, bytes); 4832 return mspace_malloc(msp, bytes);
4838 #ifdef REALLOC_ZERO_BYTES_FREES 4833 #ifdef REALLOC_ZERO_BYTES_FREES
4839 if (bytes == 0) { 4834 if (bytes == 0) {
4840 mspace_free (msp, oldmem); 4835 mspace_free(msp, oldmem);
4841 return 0; 4836 return 0;
4842 } 4837 }
4843 #endif /* REALLOC_ZERO_BYTES_FREES */ 4838 #endif /* REALLOC_ZERO_BYTES_FREES */
4844 else { 4839 else {
4845 #if FOOTERS 4840 #if FOOTERS
4846 mchunkptr p = mem2chunk (oldmem); 4841 mchunkptr p = mem2chunk(oldmem);
4847 mstate ms = get_mstate_for (p); 4842 mstate ms = get_mstate_for(p);
4848 #else /* FOOTERS */ 4843 #else /* FOOTERS */
4849 mstate ms = (mstate) msp; 4844 mstate ms = (mstate) msp;
4850 #endif /* FOOTERS */ 4845 #endif /* FOOTERS */
4851 if (!ok_magic (ms)) { 4846 if (!ok_magic(ms)) {
4852 USAGE_ERROR_ACTION (ms, ms); 4847 USAGE_ERROR_ACTION(ms, ms);
4853 return 0; 4848 return 0;
4854 } 4849 }
4855 return internal_realloc (ms, oldmem, bytes); 4850 return internal_realloc(ms, oldmem, bytes);
4856 } 4851 }
4857 } 4852 }
4858 4853
4859 void * 4854 void *
4860 mspace_memalign (mspace msp, size_t alignment, size_t bytes) 4855 mspace_memalign(mspace msp, size_t alignment, size_t bytes)
4861 { 4856 {
4862 mstate ms = (mstate) msp; 4857 mstate ms = (mstate) msp;
4863 if (!ok_magic (ms)) { 4858 if (!ok_magic(ms)) {
4864 USAGE_ERROR_ACTION (ms, ms); 4859 USAGE_ERROR_ACTION(ms, ms);
4865 return 0; 4860 return 0;
4866 } 4861 }
4867 return internal_memalign (ms, alignment, bytes); 4862 return internal_memalign(ms, alignment, bytes);
4868 } 4863 }
4869 4864
4870 void ** 4865 void **
4871 mspace_independent_calloc (mspace msp, size_t n_elements, 4866 mspace_independent_calloc(mspace msp, size_t n_elements,
4872 size_t elem_size, void *chunks[]) 4867 size_t elem_size, void *chunks[])
4873 { 4868 {
4874 size_t sz = elem_size; /* serves as 1-element array */ 4869 size_t sz = elem_size; /* serves as 1-element array */
4875 mstate ms = (mstate) msp; 4870 mstate ms = (mstate) msp;
4876 if (!ok_magic (ms)) { 4871 if (!ok_magic(ms)) {
4877 USAGE_ERROR_ACTION (ms, ms); 4872 USAGE_ERROR_ACTION(ms, ms);
4878 return 0; 4873 return 0;
4879 } 4874 }
4880 return ialloc (ms, n_elements, &sz, 3, chunks); 4875 return ialloc(ms, n_elements, &sz, 3, chunks);
4881 } 4876 }
4882 4877
4883 void ** 4878 void **
4884 mspace_independent_comalloc (mspace msp, size_t n_elements, 4879 mspace_independent_comalloc(mspace msp, size_t n_elements,
4885 size_t sizes[], void *chunks[]) 4880 size_t sizes[], void *chunks[])
4886 { 4881 {
4887 mstate ms = (mstate) msp; 4882 mstate ms = (mstate) msp;
4888 if (!ok_magic (ms)) { 4883 if (!ok_magic(ms)) {
4889 USAGE_ERROR_ACTION (ms, ms); 4884 USAGE_ERROR_ACTION(ms, ms);
4890 return 0; 4885 return 0;
4891 } 4886 }
4892 return ialloc (ms, n_elements, sizes, 0, chunks); 4887 return ialloc(ms, n_elements, sizes, 0, chunks);
4893 } 4888 }
4894 4889
4895 int 4890 int
4896 mspace_trim (mspace msp, size_t pad) 4891 mspace_trim(mspace msp, size_t pad)
4897 { 4892 {
4898 int result = 0; 4893 int result = 0;
4899 mstate ms = (mstate) msp; 4894 mstate ms = (mstate) msp;
4900 if (ok_magic (ms)) { 4895 if (ok_magic(ms)) {
4901 if (!PREACTION (ms)) { 4896 if (!PREACTION(ms)) {
4902 result = sys_trim (ms, pad); 4897 result = sys_trim(ms, pad);
4903 POSTACTION (ms); 4898 POSTACTION(ms);
4904 } 4899 }
4905 } else { 4900 } else {
4906 USAGE_ERROR_ACTION (ms, ms); 4901 USAGE_ERROR_ACTION(ms, ms);
4907 } 4902 }
4908 return result; 4903 return result;
4909 } 4904 }
4910 4905
4911 void 4906 void
4912 mspace_malloc_stats (mspace msp) 4907 mspace_malloc_stats(mspace msp)
4913 { 4908 {
4914 mstate ms = (mstate) msp; 4909 mstate ms = (mstate) msp;
4915 if (ok_magic (ms)) { 4910 if (ok_magic(ms)) {
4916 internal_malloc_stats (ms); 4911 internal_malloc_stats(ms);
4917 } else { 4912 } else {
4918 USAGE_ERROR_ACTION (ms, ms); 4913 USAGE_ERROR_ACTION(ms, ms);
4919 } 4914 }
4920 } 4915 }
4921 4916
4922 size_t 4917 size_t
4923 mspace_footprint (mspace msp) 4918 mspace_footprint(mspace msp)
4924 { 4919 {
4925 size_t result; 4920 size_t result;
4926 mstate ms = (mstate) msp; 4921 mstate ms = (mstate) msp;
4927 if (ok_magic (ms)) { 4922 if (ok_magic(ms)) {
4928 result = ms->footprint; 4923 result = ms->footprint;
4929 } 4924 }
4930 USAGE_ERROR_ACTION (ms, ms); 4925 USAGE_ERROR_ACTION(ms, ms);
4931 return result; 4926 return result;
4932 } 4927 }
4933 4928
4934 4929
4935 size_t 4930 size_t
4936 mspace_max_footprint (mspace msp) 4931 mspace_max_footprint(mspace msp)
4937 { 4932 {
4938 size_t result; 4933 size_t result;
4939 mstate ms = (mstate) msp; 4934 mstate ms = (mstate) msp;
4940 if (ok_magic (ms)) { 4935 if (ok_magic(ms)) {
4941 result = ms->max_footprint; 4936 result = ms->max_footprint;
4942 } 4937 }
4943 USAGE_ERROR_ACTION (ms, ms); 4938 USAGE_ERROR_ACTION(ms, ms);
4944 return result; 4939 return result;
4945 } 4940 }
4946 4941
4947 4942
4948 #if !NO_MALLINFO 4943 #if !NO_MALLINFO
4949 struct mallinfo 4944 struct mallinfo
4950 mspace_mallinfo (mspace msp) 4945 mspace_mallinfo(mspace msp)
4951 { 4946 {
4952 mstate ms = (mstate) msp; 4947 mstate ms = (mstate) msp;
4953 if (!ok_magic (ms)) { 4948 if (!ok_magic(ms)) {
4954 USAGE_ERROR_ACTION (ms, ms); 4949 USAGE_ERROR_ACTION(ms, ms);
4955 } 4950 }
4956 return internal_mallinfo (ms); 4951 return internal_mallinfo(ms);
4957 } 4952 }
4958 #endif /* NO_MALLINFO */ 4953 #endif /* NO_MALLINFO */
4959 4954
4960 int 4955 int
4961 mspace_mallopt (int param_number, int value) 4956 mspace_mallopt(int param_number, int value)
4962 { 4957 {
4963 return change_mparam (param_number, value); 4958 return change_mparam(param_number, value);
4964 } 4959 }
4965 4960
4966 #endif /* MSPACES */ 4961 #endif /* MSPACES */
4967 4962
4968 /* -------------------- Alternative MORECORE functions ------------------- */ 4963 /* -------------------- Alternative MORECORE functions ------------------- */