comparison src/stdlib/SDL_malloc.c @ 1662:782fd950bd46 SDL-1.3

Revamp of the video system in progress - adding support for multiple displays, multiple windows, and a full video mode selection API. WARNING: None of the video drivers have been updated for the new API yet! The API is still under design and very fluid. The code is now run through a consistent indent format: indent -i4 -nut -nsc -br -ce The headers are being converted to automatically generate doxygen documentation.
author Sam Lantinga <slouken@libsdl.org>
date Sun, 28 May 2006 13:04:16 +0000
parents 8dfa9a6d69a5
children 4da1ee79c9af
comparison
equal deleted inserted replaced
1661:281d3f4870e5 1662:782fd950bd46
475 */ 475 */
476 476
477 #ifndef WIN32 477 #ifndef WIN32
478 #ifdef _WIN32 478 #ifdef _WIN32
479 #define WIN32 1 479 #define WIN32 1
480 #endif /* _WIN32 */ 480 #endif /* _WIN32 */
481 #endif /* WIN32 */ 481 #endif /* WIN32 */
482 #ifdef WIN32 482 #ifdef WIN32
483 #define WIN32_LEAN_AND_MEAN 483 #define WIN32_LEAN_AND_MEAN
484 #include <windows.h> 484 #include <windows.h>
485 #define HAVE_MMAP 1 485 #define HAVE_MMAP 1
486 #define HAVE_MORECORE 0 486 #define HAVE_MORECORE 0
489 #define LACKS_SYS_MMAN_H 489 #define LACKS_SYS_MMAN_H
490 #define LACKS_STRING_H 490 #define LACKS_STRING_H
491 #define LACKS_STRINGS_H 491 #define LACKS_STRINGS_H
492 #define LACKS_SYS_TYPES_H 492 #define LACKS_SYS_TYPES_H
493 #define LACKS_ERRNO_H 493 #define LACKS_ERRNO_H
494 #define LACKS_FCNTL_H 494 #define LACKS_FCNTL_H
495 #define MALLOC_FAILURE_ACTION 495 #define MALLOC_FAILURE_ACTION
496 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */ 496 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
497 #endif /* WIN32 */ 497 #endif /* WIN32 */
498 498
499 #if defined(DARWIN) || defined(_DARWIN) 499 #if defined(DARWIN) || defined(_DARWIN)
500 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ 500 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
501 #ifndef HAVE_MORECORE 501 #ifndef HAVE_MORECORE
502 #define HAVE_MORECORE 0 502 #define HAVE_MORECORE 0
503 #define HAVE_MMAP 1 503 #define HAVE_MMAP 1
504 #endif /* HAVE_MORECORE */ 504 #endif /* HAVE_MORECORE */
505 #endif /* DARWIN */ 505 #endif /* DARWIN */
506 506
507 #ifndef LACKS_SYS_TYPES_H 507 #ifndef LACKS_SYS_TYPES_H
508 #include <sys/types.h> /* For size_t */ 508 #include <sys/types.h> /* For size_t */
509 #endif /* LACKS_SYS_TYPES_H */ 509 #endif /* LACKS_SYS_TYPES_H */
510 510
511 /* The maximum possible size_t value has all bits set */ 511 /* The maximum possible size_t value has all bits set */
512 #define MAX_SIZE_T (~(size_t)0) 512 #define MAX_SIZE_T (~(size_t)0)
513 513
514 #ifndef ONLY_MSPACES 514 #ifndef ONLY_MSPACES
515 #define ONLY_MSPACES 0 515 #define ONLY_MSPACES 0
516 #endif /* ONLY_MSPACES */ 516 #endif /* ONLY_MSPACES */
517 #ifndef MSPACES 517 #ifndef MSPACES
518 #if ONLY_MSPACES 518 #if ONLY_MSPACES
519 #define MSPACES 1 519 #define MSPACES 1
520 #else /* ONLY_MSPACES */ 520 #else /* ONLY_MSPACES */
521 #define MSPACES 0 521 #define MSPACES 0
522 #endif /* ONLY_MSPACES */ 522 #endif /* ONLY_MSPACES */
523 #endif /* MSPACES */ 523 #endif /* MSPACES */
524 #ifndef MALLOC_ALIGNMENT 524 #ifndef MALLOC_ALIGNMENT
525 #define MALLOC_ALIGNMENT ((size_t)8U) 525 #define MALLOC_ALIGNMENT ((size_t)8U)
526 #endif /* MALLOC_ALIGNMENT */ 526 #endif /* MALLOC_ALIGNMENT */
527 #ifndef FOOTERS 527 #ifndef FOOTERS
528 #define FOOTERS 0 528 #define FOOTERS 0
529 #endif /* FOOTERS */ 529 #endif /* FOOTERS */
530 #ifndef ABORT 530 #ifndef ABORT
531 #define ABORT abort() 531 #define ABORT abort()
532 #endif /* ABORT */ 532 #endif /* ABORT */
533 #ifndef ABORT_ON_ASSERT_FAILURE 533 #ifndef ABORT_ON_ASSERT_FAILURE
534 #define ABORT_ON_ASSERT_FAILURE 1 534 #define ABORT_ON_ASSERT_FAILURE 1
535 #endif /* ABORT_ON_ASSERT_FAILURE */ 535 #endif /* ABORT_ON_ASSERT_FAILURE */
536 #ifndef PROCEED_ON_ERROR 536 #ifndef PROCEED_ON_ERROR
537 #define PROCEED_ON_ERROR 0 537 #define PROCEED_ON_ERROR 0
538 #endif /* PROCEED_ON_ERROR */ 538 #endif /* PROCEED_ON_ERROR */
539 #ifndef USE_LOCKS 539 #ifndef USE_LOCKS
540 #define USE_LOCKS 0 540 #define USE_LOCKS 0
541 #endif /* USE_LOCKS */ 541 #endif /* USE_LOCKS */
542 #ifndef INSECURE 542 #ifndef INSECURE
543 #define INSECURE 0 543 #define INSECURE 0
544 #endif /* INSECURE */ 544 #endif /* INSECURE */
545 #ifndef HAVE_MMAP 545 #ifndef HAVE_MMAP
546 #define HAVE_MMAP 1 546 #define HAVE_MMAP 1
547 #endif /* HAVE_MMAP */ 547 #endif /* HAVE_MMAP */
548 #ifndef MMAP_CLEARS 548 #ifndef MMAP_CLEARS
549 #define MMAP_CLEARS 1 549 #define MMAP_CLEARS 1
550 #endif /* MMAP_CLEARS */ 550 #endif /* MMAP_CLEARS */
551 #ifndef HAVE_MREMAP 551 #ifndef HAVE_MREMAP
552 #ifdef linux 552 #ifdef linux
553 #define HAVE_MREMAP 1 553 #define HAVE_MREMAP 1
554 #else /* linux */ 554 #else /* linux */
555 #define HAVE_MREMAP 0 555 #define HAVE_MREMAP 0
556 #endif /* linux */ 556 #endif /* linux */
557 #endif /* HAVE_MREMAP */ 557 #endif /* HAVE_MREMAP */
558 #ifndef MALLOC_FAILURE_ACTION 558 #ifndef MALLOC_FAILURE_ACTION
559 #define MALLOC_FAILURE_ACTION errno = ENOMEM; 559 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
560 #endif /* MALLOC_FAILURE_ACTION */ 560 #endif /* MALLOC_FAILURE_ACTION */
561 #ifndef HAVE_MORECORE 561 #ifndef HAVE_MORECORE
562 #if ONLY_MSPACES 562 #if ONLY_MSPACES
563 #define HAVE_MORECORE 0 563 #define HAVE_MORECORE 0
564 #else /* ONLY_MSPACES */ 564 #else /* ONLY_MSPACES */
565 #define HAVE_MORECORE 1 565 #define HAVE_MORECORE 1
566 #endif /* ONLY_MSPACES */ 566 #endif /* ONLY_MSPACES */
567 #endif /* HAVE_MORECORE */ 567 #endif /* HAVE_MORECORE */
568 #if !HAVE_MORECORE 568 #if !HAVE_MORECORE
569 #define MORECORE_CONTIGUOUS 0 569 #define MORECORE_CONTIGUOUS 0
570 #else /* !HAVE_MORECORE */ 570 #else /* !HAVE_MORECORE */
571 #ifndef MORECORE 571 #ifndef MORECORE
572 #define MORECORE sbrk 572 #define MORECORE sbrk
573 #endif /* MORECORE */ 573 #endif /* MORECORE */
574 #ifndef MORECORE_CONTIGUOUS 574 #ifndef MORECORE_CONTIGUOUS
575 #define MORECORE_CONTIGUOUS 1 575 #define MORECORE_CONTIGUOUS 1
576 #endif /* MORECORE_CONTIGUOUS */ 576 #endif /* MORECORE_CONTIGUOUS */
577 #endif /* HAVE_MORECORE */ 577 #endif /* HAVE_MORECORE */
578 #ifndef DEFAULT_GRANULARITY 578 #ifndef DEFAULT_GRANULARITY
579 #if MORECORE_CONTIGUOUS 579 #if MORECORE_CONTIGUOUS
580 #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ 580 #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
581 #else /* MORECORE_CONTIGUOUS */ 581 #else /* MORECORE_CONTIGUOUS */
582 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) 582 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
583 #endif /* MORECORE_CONTIGUOUS */ 583 #endif /* MORECORE_CONTIGUOUS */
584 #endif /* DEFAULT_GRANULARITY */ 584 #endif /* DEFAULT_GRANULARITY */
585 #ifndef DEFAULT_TRIM_THRESHOLD 585 #ifndef DEFAULT_TRIM_THRESHOLD
586 #ifndef MORECORE_CANNOT_TRIM 586 #ifndef MORECORE_CANNOT_TRIM
587 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) 587 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
588 #else /* MORECORE_CANNOT_TRIM */ 588 #else /* MORECORE_CANNOT_TRIM */
589 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T 589 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
590 #endif /* MORECORE_CANNOT_TRIM */ 590 #endif /* MORECORE_CANNOT_TRIM */
591 #endif /* DEFAULT_TRIM_THRESHOLD */ 591 #endif /* DEFAULT_TRIM_THRESHOLD */
592 #ifndef DEFAULT_MMAP_THRESHOLD 592 #ifndef DEFAULT_MMAP_THRESHOLD
593 #if HAVE_MMAP 593 #if HAVE_MMAP
594 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) 594 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
595 #else /* HAVE_MMAP */ 595 #else /* HAVE_MMAP */
596 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T 596 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
597 #endif /* HAVE_MMAP */ 597 #endif /* HAVE_MMAP */
598 #endif /* DEFAULT_MMAP_THRESHOLD */ 598 #endif /* DEFAULT_MMAP_THRESHOLD */
599 #ifndef USE_BUILTIN_FFS 599 #ifndef USE_BUILTIN_FFS
600 #define USE_BUILTIN_FFS 0 600 #define USE_BUILTIN_FFS 0
601 #endif /* USE_BUILTIN_FFS */ 601 #endif /* USE_BUILTIN_FFS */
602 #ifndef USE_DEV_RANDOM 602 #ifndef USE_DEV_RANDOM
603 #define USE_DEV_RANDOM 0 603 #define USE_DEV_RANDOM 0
604 #endif /* USE_DEV_RANDOM */ 604 #endif /* USE_DEV_RANDOM */
605 #ifndef NO_MALLINFO 605 #ifndef NO_MALLINFO
606 #define NO_MALLINFO 0 606 #define NO_MALLINFO 0
607 #endif /* NO_MALLINFO */ 607 #endif /* NO_MALLINFO */
608 #ifndef MALLINFO_FIELD_TYPE 608 #ifndef MALLINFO_FIELD_TYPE
609 #define MALLINFO_FIELD_TYPE size_t 609 #define MALLINFO_FIELD_TYPE size_t
610 #endif /* MALLINFO_FIELD_TYPE */ 610 #endif /* MALLINFO_FIELD_TYPE */
611 611
612 #define memset SDL_memset 612 #define memset SDL_memset
613 #define memcpy SDL_memcpy 613 #define memcpy SDL_memcpy
614 #define malloc SDL_malloc 614 #define malloc SDL_malloc
615 #define calloc SDL_calloc 615 #define calloc SDL_calloc
656 656
657 #ifdef HAVE_USR_INCLUDE_MALLOC_H 657 #ifdef HAVE_USR_INCLUDE_MALLOC_H
658 #include "/usr/include/malloc.h" 658 #include "/usr/include/malloc.h"
659 #else /* HAVE_USR_INCLUDE_MALLOC_H */ 659 #else /* HAVE_USR_INCLUDE_MALLOC_H */
660 660
661 struct mallinfo { 661 struct mallinfo
662 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ 662 {
663 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ 663 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
664 MALLINFO_FIELD_TYPE smblks; /* always 0 */ 664 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
665 MALLINFO_FIELD_TYPE hblks; /* always 0 */ 665 MALLINFO_FIELD_TYPE smblks; /* always 0 */
666 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ 666 MALLINFO_FIELD_TYPE hblks; /* always 0 */
667 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ 667 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
668 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ 668 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
669 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ 669 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
670 MALLINFO_FIELD_TYPE fordblks; /* total free space */ 670 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
671 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ 671 MALLINFO_FIELD_TYPE fordblks; /* total free space */
672 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
672 }; 673 };
673 674
674 #endif /* HAVE_USR_INCLUDE_MALLOC_H */ 675 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
675 #endif /* NO_MALLINFO */ 676 #endif /* NO_MALLINFO */
676 677
677 #ifdef __cplusplus 678 #ifdef __cplusplus
678 extern "C" { 679 extern "C"
679 #endif /* __cplusplus */ 680 {
681 #endif /* __cplusplus */
680 682
681 #if !ONLY_MSPACES 683 #if !ONLY_MSPACES
682 684
683 /* ------------------- Declarations of public routines ------------------- */ 685 /* ------------------- Declarations of public routines ------------------- */
684 686
697 #define dlmalloc_usable_size malloc_usable_size 699 #define dlmalloc_usable_size malloc_usable_size
698 #define dlmalloc_footprint malloc_footprint 700 #define dlmalloc_footprint malloc_footprint
699 #define dlmalloc_max_footprint malloc_max_footprint 701 #define dlmalloc_max_footprint malloc_max_footprint
700 #define dlindependent_calloc independent_calloc 702 #define dlindependent_calloc independent_calloc
701 #define dlindependent_comalloc independent_comalloc 703 #define dlindependent_comalloc independent_comalloc
702 #endif /* USE_DL_PREFIX */ 704 #endif /* USE_DL_PREFIX */
703 705
704 706
705 /* 707 /*
706 malloc(size_t n) 708 malloc(size_t n)
707 Returns a pointer to a newly allocated chunk of at least n bytes, or 709 Returns a pointer to a newly allocated chunk of at least n bytes, or
714 arguments that would be negative if signed are interpreted as 716 arguments that would be negative if signed are interpreted as
715 requests for huge amounts of space, which will often fail. The 717 requests for huge amounts of space, which will often fail. The
716 maximum supported value of n differs across systems, but is in all 718 maximum supported value of n differs across systems, but is in all
717 cases less than the maximum representable value of a size_t. 719 cases less than the maximum representable value of a size_t.
718 */ 720 */
719 void* dlmalloc(size_t); 721 void *dlmalloc (size_t);
720 722
721 /* 723 /*
722 free(void* p) 724 free(void* p)
723 Releases the chunk of memory pointed to by p, that had been previously 725 Releases the chunk of memory pointed to by p, that had been previously
724 allocated using malloc or a related routine such as realloc. 726 allocated using malloc or a related routine such as realloc.
725 It has no effect if p is null. If p was not malloced or already 727 It has no effect if p is null. If p was not malloced or already
726 freed, free(p) will by default cause the current program to abort. 728 freed, free(p) will by default cause the current program to abort.
727 */ 729 */
728 void dlfree(void*); 730 void dlfree (void *);
729 731
730 /* 732 /*
731 calloc(size_t n_elements, size_t element_size); 733 calloc(size_t n_elements, size_t element_size);
732 Returns a pointer to n_elements * element_size bytes, with all locations 734 Returns a pointer to n_elements * element_size bytes, with all locations
733 set to zero. 735 set to zero.
734 */ 736 */
735 void* dlcalloc(size_t, size_t); 737 void *dlcalloc (size_t, size_t);
736 738
737 /* 739 /*
738 realloc(void* p, size_t n) 740 realloc(void* p, size_t n)
739 Returns a pointer to a chunk of size n that contains the same data 741 Returns a pointer to a chunk of size n that contains the same data
740 as does chunk p up to the minimum of (n, p's size) bytes, or null 742 as does chunk p up to the minimum of (n, p's size) bytes, or null
755 757
756 The old unix realloc convention of allowing the last-free'd chunk 758 The old unix realloc convention of allowing the last-free'd chunk
757 to be used as an argument to realloc is not supported. 759 to be used as an argument to realloc is not supported.
758 */ 760 */
759 761
760 void* dlrealloc(void*, size_t); 762 void *dlrealloc (void *, size_t);
761 763
762 /* 764 /*
763 memalign(size_t alignment, size_t n); 765 memalign(size_t alignment, size_t n);
764 Returns a pointer to a newly allocated chunk of n bytes, aligned 766 Returns a pointer to a newly allocated chunk of n bytes, aligned
765 in accord with the alignment argument. 767 in accord with the alignment argument.
769 8-byte alignment is guaranteed by normal malloc calls, so don't 771 8-byte alignment is guaranteed by normal malloc calls, so don't
770 bother calling memalign with an argument of 8 or less. 772 bother calling memalign with an argument of 8 or less.
771 773
772 Overreliance on memalign is a sure way to fragment space. 774 Overreliance on memalign is a sure way to fragment space.
773 */ 775 */
774 void* dlmemalign(size_t, size_t); 776 void *dlmemalign (size_t, size_t);
775 777
776 /* 778 /*
777 valloc(size_t n); 779 valloc(size_t n);
778 Equivalent to memalign(pagesize, n), where pagesize is the page 780 Equivalent to memalign(pagesize, n), where pagesize is the page
779 size of the system. If the pagesize is unknown, 4096 is used. 781 size of the system. If the pagesize is unknown, 4096 is used.
780 */ 782 */
781 void* dlvalloc(size_t); 783 void *dlvalloc (size_t);
782 784
783 /* 785 /*
784 mallopt(int parameter_number, int parameter_value) 786 mallopt(int parameter_number, int parameter_value)
785 Sets tunable parameters The format is to provide a 787 Sets tunable parameters The format is to provide a
786 (parameter-number, parameter-value) pair. mallopt then sets the 788 (parameter-number, parameter-value) pair. mallopt then sets the
796 Symbol param # default allowed param values 798 Symbol param # default allowed param values
797 M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables) 799 M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables)
798 M_GRANULARITY -2 page size any power of 2 >= page size 800 M_GRANULARITY -2 page size any power of 2 >= page size
799 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) 801 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
800 */ 802 */
801 int dlmallopt(int, int); 803 int dlmallopt (int, int);
802 804
803 /* 805 /*
804 malloc_footprint(); 806 malloc_footprint();
805 Returns the number of bytes obtained from the system. The total 807 Returns the number of bytes obtained from the system. The total
806 number of bytes allocated by malloc, realloc etc., is less than this 808 number of bytes allocated by malloc, realloc etc., is less than this
807 value. Unlike mallinfo, this function returns only a precomputed 809 value. Unlike mallinfo, this function returns only a precomputed
808 result, so can be called frequently to monitor memory consumption. 810 result, so can be called frequently to monitor memory consumption.
809 Even if locks are otherwise defined, this function does not use them, 811 Even if locks are otherwise defined, this function does not use them,
810 so results might not be up to date. 812 so results might not be up to date.
811 */ 813 */
812 size_t dlmalloc_footprint(void); 814 size_t dlmalloc_footprint (void);
813 815
814 /* 816 /*
815 malloc_max_footprint(); 817 malloc_max_footprint();
816 Returns the maximum number of bytes obtained from the system. This 818 Returns the maximum number of bytes obtained from the system. This
817 value will be greater than current footprint if deallocated space 819 value will be greater than current footprint if deallocated space
820 this function returns only a precomputed result, so can be called 822 this function returns only a precomputed result, so can be called
821 frequently to monitor memory consumption. Even if locks are 823 frequently to monitor memory consumption. Even if locks are
822 otherwise defined, this function does not use them, so results might 824 otherwise defined, this function does not use them, so results might
823 not be up to date. 825 not be up to date.
824 */ 826 */
825 size_t dlmalloc_max_footprint(void); 827 size_t dlmalloc_max_footprint (void);
826 828
827 #if !NO_MALLINFO 829 #if !NO_MALLINFO
828 /* 830 /*
829 mallinfo() 831 mallinfo()
830 Returns (by copy) a struct containing various summary statistics: 832 Returns (by copy) a struct containing various summary statistics:
845 847
846 Because these fields are ints, but internal bookkeeping may 848 Because these fields are ints, but internal bookkeeping may
847 be kept as longs, the reported values may wrap around zero and 849 be kept as longs, the reported values may wrap around zero and
848 thus be inaccurate. 850 thus be inaccurate.
849 */ 851 */
850 struct mallinfo dlmallinfo(void); 852 struct mallinfo dlmallinfo (void);
851 #endif /* NO_MALLINFO */ 853 #endif /* NO_MALLINFO */
852 854
853 /* 855 /*
854 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); 856 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
855 857
856 independent_calloc is similar to calloc, but instead of returning a 858 independent_calloc is similar to calloc, but instead of returning a
900 pool[i]->next = pool[i+1]; 902 pool[i]->next = pool[i+1];
901 free(pool); // Can now free the array (or not, if it is needed later) 903 free(pool); // Can now free the array (or not, if it is needed later)
902 return first; 904 return first;
903 } 905 }
904 */ 906 */
905 void** dlindependent_calloc(size_t, size_t, void**); 907 void **dlindependent_calloc (size_t, size_t, void **);
906 908
907 /* 909 /*
908 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); 910 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
909 911
910 independent_comalloc allocates, all at once, a set of n_elements 912 independent_comalloc allocates, all at once, a set of n_elements
961 963
962 Overuse of independent_comalloc can increase overall memory usage, 964 Overuse of independent_comalloc can increase overall memory usage,
963 since it cannot reuse existing noncontiguous small chunks that 965 since it cannot reuse existing noncontiguous small chunks that
964 might be available for some of the elements. 966 might be available for some of the elements.
965 */ 967 */
966 void** dlindependent_comalloc(size_t, size_t*, void**); 968 void **dlindependent_comalloc (size_t, size_t *, void **);
967 969
968 970
969 /* 971 /*
970 pvalloc(size_t n); 972 pvalloc(size_t n);
971 Equivalent to valloc(minimum-page-that-holds(n)), that is, 973 Equivalent to valloc(minimum-page-that-holds(n)), that is,
972 round up n to nearest pagesize. 974 round up n to nearest pagesize.
973 */ 975 */
974 void* dlpvalloc(size_t); 976 void *dlpvalloc (size_t);
975 977
976 /* 978 /*
977 malloc_trim(size_t pad); 979 malloc_trim(size_t pad);
978 980
979 If possible, gives memory back to the system (via negative arguments 981 If possible, gives memory back to the system (via negative arguments
992 trailing space to service future expected allocations without having 994 trailing space to service future expected allocations without having
993 to re-obtain memory from the system. 995 to re-obtain memory from the system.
994 996
995 Malloc_trim returns 1 if it actually released any memory, else 0. 997 Malloc_trim returns 1 if it actually released any memory, else 0.
996 */ 998 */
997 int dlmalloc_trim(size_t); 999 int dlmalloc_trim (size_t);
998 1000
999 /* 1001 /*
1000 malloc_usable_size(void* p); 1002 malloc_usable_size(void* p);
1001 1003
1002 Returns the number of bytes you can actually use in 1004 Returns the number of bytes you can actually use in
1008 debugging and assertions, for example: 1010 debugging and assertions, for example:
1009 1011
1010 p = malloc(n); 1012 p = malloc(n);
1011 assert(malloc_usable_size(p) >= 256); 1013 assert(malloc_usable_size(p) >= 256);
1012 */ 1014 */
1013 size_t dlmalloc_usable_size(void*); 1015 size_t dlmalloc_usable_size (void *);
1014 1016
1015 /* 1017 /*
1016 malloc_stats(); 1018 malloc_stats();
1017 Prints on stderr the amount of space obtained from the system (both 1019 Prints on stderr the amount of space obtained from the system (both
1018 via sbrk and mmap), the maximum amount (which may be more than 1020 via sbrk and mmap), the maximum amount (which may be more than
1029 (normally sbrk) outside of malloc. 1031 (normally sbrk) outside of malloc.
1030 1032
1031 malloc_stats prints only the most commonly interesting statistics. 1033 malloc_stats prints only the most commonly interesting statistics.
1032 More information can be obtained by calling mallinfo. 1034 More information can be obtained by calling mallinfo.
1033 */ 1035 */
1034 void dlmalloc_stats(void); 1036 void dlmalloc_stats (void);
1035 1037
1036 #endif /* ONLY_MSPACES */ 1038 #endif /* ONLY_MSPACES */
1037 1039
1038 #if MSPACES 1040 #if MSPACES
1039 1041
1040 /* 1042 /*
1041 mspace is an opaque type representing an independent 1043 mspace is an opaque type representing an independent
1042 region of space that supports mspace_malloc, etc. 1044 region of space that supports mspace_malloc, etc.
1043 */ 1045 */
1044 typedef void* mspace; 1046 typedef void *mspace;
1045 1047
1046 /* 1048 /*
1047 create_mspace creates and returns a new independent space with the 1049 create_mspace creates and returns a new independent space with the
1048 given initial capacity, or, if 0, the default granularity size. It 1050 given initial capacity, or, if 0, the default granularity size. It
1049 returns null if there is no system memory available to create the 1051 returns null if there is no system memory available to create the
1052 dynamically as needed to service mspace_malloc requests. You can 1054 dynamically as needed to service mspace_malloc requests. You can
1053 control the sizes of incremental increases of this space by 1055 control the sizes of incremental increases of this space by
1054 compiling with a different DEFAULT_GRANULARITY or dynamically 1056 compiling with a different DEFAULT_GRANULARITY or dynamically
1055 setting with mallopt(M_GRANULARITY, value). 1057 setting with mallopt(M_GRANULARITY, value).
1056 */ 1058 */
1057 mspace create_mspace(size_t capacity, int locked); 1059 mspace create_mspace (size_t capacity, int locked);
1058 1060
1059 /* 1061 /*
1060 destroy_mspace destroys the given space, and attempts to return all 1062 destroy_mspace destroys the given space, and attempts to return all
1061 of its memory back to the system, returning the total number of 1063 of its memory back to the system, returning the total number of
1062 bytes freed. After destruction, the results of access to all memory 1064 bytes freed. After destruction, the results of access to all memory
1063 used by the space become undefined. 1065 used by the space become undefined.
1064 */ 1066 */
1065 size_t destroy_mspace(mspace msp); 1067 size_t destroy_mspace (mspace msp);
1066 1068
1067 /* 1069 /*
1068 create_mspace_with_base uses the memory supplied as the initial base 1070 create_mspace_with_base uses the memory supplied as the initial base
1069 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this 1071 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1070 space is used for bookkeeping, so the capacity must be at least this 1072 space is used for bookkeeping, so the capacity must be at least this
1071 large. (Otherwise 0 is returned.) When this initial space is 1073 large. (Otherwise 0 is returned.) When this initial space is
1072 exhausted, additional memory will be obtained from the system. 1074 exhausted, additional memory will be obtained from the system.
1073 Destroying this space will deallocate all additionally allocated 1075 Destroying this space will deallocate all additionally allocated
1074 space (if possible) but not the initial base. 1076 space (if possible) but not the initial base.
1075 */ 1077 */
1076 mspace create_mspace_with_base(void* base, size_t capacity, int locked); 1078 mspace create_mspace_with_base (void *base, size_t capacity, int locked);
1077 1079
1078 /* 1080 /*
1079 mspace_malloc behaves as malloc, but operates within 1081 mspace_malloc behaves as malloc, but operates within
1080 the given space. 1082 the given space.
1081 */ 1083 */
1082 void* mspace_malloc(mspace msp, size_t bytes); 1084 void *mspace_malloc (mspace msp, size_t bytes);
1083 1085
1084 /* 1086 /*
1085 mspace_free behaves as free, but operates within 1087 mspace_free behaves as free, but operates within
1086 the given space. 1088 the given space.
1087 1089
1088 If compiled with FOOTERS==1, mspace_free is not actually needed. 1090 If compiled with FOOTERS==1, mspace_free is not actually needed.
1089 free may be called instead of mspace_free because freed chunks from 1091 free may be called instead of mspace_free because freed chunks from
1090 any space are handled by their originating spaces. 1092 any space are handled by their originating spaces.
1091 */ 1093 */
1092 void mspace_free(mspace msp, void* mem); 1094 void mspace_free (mspace msp, void *mem);
1093 1095
1094 /* 1096 /*
1095 mspace_realloc behaves as realloc, but operates within 1097 mspace_realloc behaves as realloc, but operates within
1096 the given space. 1098 the given space.
1097 1099
1098 If compiled with FOOTERS==1, mspace_realloc is not actually 1100 If compiled with FOOTERS==1, mspace_realloc is not actually
1099 needed. realloc may be called instead of mspace_realloc because 1101 needed. realloc may be called instead of mspace_realloc because
1100 realloced chunks from any space are handled by their originating 1102 realloced chunks from any space are handled by their originating
1101 spaces. 1103 spaces.
1102 */ 1104 */
1103 void* mspace_realloc(mspace msp, void* mem, size_t newsize); 1105 void *mspace_realloc (mspace msp, void *mem, size_t newsize);
1104 1106
1105 /* 1107 /*
1106 mspace_calloc behaves as calloc, but operates within 1108 mspace_calloc behaves as calloc, but operates within
1107 the given space. 1109 the given space.
1108 */ 1110 */
1109 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); 1111 void *mspace_calloc (mspace msp, size_t n_elements, size_t elem_size);
1110 1112
1111 /* 1113 /*
1112 mspace_memalign behaves as memalign, but operates within 1114 mspace_memalign behaves as memalign, but operates within
1113 the given space. 1115 the given space.
1114 */ 1116 */
1115 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); 1117 void *mspace_memalign (mspace msp, size_t alignment, size_t bytes);
1116 1118
1117 /* 1119 /*
1118 mspace_independent_calloc behaves as independent_calloc, but 1120 mspace_independent_calloc behaves as independent_calloc, but
1119 operates within the given space. 1121 operates within the given space.
1120 */ 1122 */
1121 void** mspace_independent_calloc(mspace msp, size_t n_elements, 1123 void **mspace_independent_calloc (mspace msp, size_t n_elements,
1122 size_t elem_size, void* chunks[]); 1124 size_t elem_size, void *chunks[]);
1123 1125
1124 /* 1126 /*
1125 mspace_independent_comalloc behaves as independent_comalloc, but 1127 mspace_independent_comalloc behaves as independent_comalloc, but
1126 operates within the given space. 1128 operates within the given space.
1127 */ 1129 */
1128 void** mspace_independent_comalloc(mspace msp, size_t n_elements, 1130 void **mspace_independent_comalloc (mspace msp, size_t n_elements,
1129 size_t sizes[], void* chunks[]); 1131 size_t sizes[], void *chunks[]);
1130 1132
1131 /* 1133 /*
1132 mspace_footprint() returns the number of bytes obtained from the 1134 mspace_footprint() returns the number of bytes obtained from the
1133 system for this space. 1135 system for this space.
1134 */ 1136 */
1135 size_t mspace_footprint(mspace msp); 1137 size_t mspace_footprint (mspace msp);
1136 1138
1137 /* 1139 /*
1138 mspace_max_footprint() returns the peak number of bytes obtained from the 1140 mspace_max_footprint() returns the peak number of bytes obtained from the
1139 system for this space. 1141 system for this space.
1140 */ 1142 */
1141 size_t mspace_max_footprint(mspace msp); 1143 size_t mspace_max_footprint (mspace msp);
1142 1144
1143 1145
1144 #if !NO_MALLINFO 1146 #if !NO_MALLINFO
1145 /* 1147 /*
1146 mspace_mallinfo behaves as mallinfo, but reports properties of 1148 mspace_mallinfo behaves as mallinfo, but reports properties of
1147 the given space. 1149 the given space.
1148 */ 1150 */
1149 struct mallinfo mspace_mallinfo(mspace msp); 1151 struct mallinfo mspace_mallinfo (mspace msp);
1150 #endif /* NO_MALLINFO */ 1152 #endif /* NO_MALLINFO */
1151 1153
1152 /* 1154 /*
1153 mspace_malloc_stats behaves as malloc_stats, but reports 1155 mspace_malloc_stats behaves as malloc_stats, but reports
1154 properties of the given space. 1156 properties of the given space.
1155 */ 1157 */
1156 void mspace_malloc_stats(mspace msp); 1158 void mspace_malloc_stats (mspace msp);
1157 1159
1158 /* 1160 /*
1159 mspace_trim behaves as malloc_trim, but 1161 mspace_trim behaves as malloc_trim, but
1160 operates within the given space. 1162 operates within the given space.
1161 */ 1163 */
1162 int mspace_trim(mspace msp, size_t pad); 1164 int mspace_trim (mspace msp, size_t pad);
1163 1165
1164 /* 1166 /*
1165 An alias for mallopt. 1167 An alias for mallopt.
1166 */ 1168 */
1167 int mspace_mallopt(int, int); 1169 int mspace_mallopt (int, int);
1168 1170
1169 #endif /* MSPACES */ 1171 #endif /* MSPACES */
1170 1172
1171 #ifdef __cplusplus 1173 #ifdef __cplusplus
1172 }; /* end of extern "C" */ 1174 }; /* end of extern "C" */
1173 #endif /* __cplusplus */ 1175 #endif /* __cplusplus */
1174 1176
1175 /* 1177 /*
1176 ======================================================================== 1178 ========================================================================
1177 To make a fully customizable malloc.h header file, cut everything 1179 To make a fully customizable malloc.h header file, cut everything
1183 /* #include "malloc.h" */ 1185 /* #include "malloc.h" */
1184 1186
1185 /*------------------------------ internal #includes ---------------------- */ 1187 /*------------------------------ internal #includes ---------------------- */
1186 1188
1187 #ifdef _MSC_VER 1189 #ifdef _MSC_VER
1188 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */ 1190 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1189 #endif /* _MSC_VER */ 1191 #endif /* _MSC_VER */
1190 1192
1191 #ifndef LACKS_STDIO_H 1193 #ifndef LACKS_STDIO_H
1192 #include <stdio.h> /* for printing in malloc_stats */ 1194 #include <stdio.h> /* for printing in malloc_stats */
1193 #endif 1195 #endif
1194 1196
1195 #ifndef LACKS_ERRNO_H 1197 #ifndef LACKS_ERRNO_H
1196 #include <errno.h> /* for MALLOC_FAILURE_ACTION */ 1198 #include <errno.h> /* for MALLOC_FAILURE_ACTION */
1197 #endif /* LACKS_ERRNO_H */ 1199 #endif /* LACKS_ERRNO_H */
1198 #if FOOTERS 1200 #if FOOTERS
1199 #include <time.h> /* for magic initialization */ 1201 #include <time.h> /* for magic initialization */
1200 #endif /* FOOTERS */ 1202 #endif /* FOOTERS */
1201 #ifndef LACKS_STDLIB_H 1203 #ifndef LACKS_STDLIB_H
1202 #include <stdlib.h> /* for abort() */ 1204 #include <stdlib.h> /* for abort() */
1203 #endif /* LACKS_STDLIB_H */ 1205 #endif /* LACKS_STDLIB_H */
1204 #ifdef DEBUG 1206 #ifdef DEBUG
1205 #if ABORT_ON_ASSERT_FAILURE 1207 #if ABORT_ON_ASSERT_FAILURE
1206 #define assert(x) if(!(x)) ABORT 1208 #define assert(x) if(!(x)) ABORT
1207 #else /* ABORT_ON_ASSERT_FAILURE */ 1209 #else /* ABORT_ON_ASSERT_FAILURE */
1208 #include <assert.h> 1210 #include <assert.h>
1209 #endif /* ABORT_ON_ASSERT_FAILURE */ 1211 #endif /* ABORT_ON_ASSERT_FAILURE */
1210 #else /* DEBUG */ 1212 #else /* DEBUG */
1211 #define assert(x) 1213 #define assert(x)
1212 #endif /* DEBUG */ 1214 #endif /* DEBUG */
1213 #ifndef LACKS_STRING_H 1215 #ifndef LACKS_STRING_H
1214 #include <string.h> /* for memset etc */ 1216 #include <string.h> /* for memset etc */
1215 #endif /* LACKS_STRING_H */ 1217 #endif /* LACKS_STRING_H */
1216 #if USE_BUILTIN_FFS 1218 #if USE_BUILTIN_FFS
1217 #ifndef LACKS_STRINGS_H 1219 #ifndef LACKS_STRINGS_H
1218 #include <strings.h> /* for ffs */ 1220 #include <strings.h> /* for ffs */
1219 #endif /* LACKS_STRINGS_H */ 1221 #endif /* LACKS_STRINGS_H */
1220 #endif /* USE_BUILTIN_FFS */ 1222 #endif /* USE_BUILTIN_FFS */
1221 #if HAVE_MMAP 1223 #if HAVE_MMAP
1222 #ifndef LACKS_SYS_MMAN_H 1224 #ifndef LACKS_SYS_MMAN_H
1223 #include <sys/mman.h> /* for mmap */ 1225 #include <sys/mman.h> /* for mmap */
1224 #endif /* LACKS_SYS_MMAN_H */ 1226 #endif /* LACKS_SYS_MMAN_H */
1225 #ifndef LACKS_FCNTL_H 1227 #ifndef LACKS_FCNTL_H
1226 #include <fcntl.h> 1228 #include <fcntl.h>
1227 #endif /* LACKS_FCNTL_H */ 1229 #endif /* LACKS_FCNTL_H */
1228 #endif /* HAVE_MMAP */ 1230 #endif /* HAVE_MMAP */
1229 #if HAVE_MORECORE 1231 #if HAVE_MORECORE
1230 #ifndef LACKS_UNISTD_H 1232 #ifndef LACKS_UNISTD_H
1231 #include <unistd.h> /* for sbrk */ 1233 #include <unistd.h> /* for sbrk */
1232 #else /* LACKS_UNISTD_H */ 1234 #else /* LACKS_UNISTD_H */
1233 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) 1235 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1234 extern void* sbrk(ptrdiff_t); 1236 extern void *sbrk (ptrdiff_t);
1235 #endif /* FreeBSD etc */ 1237 #endif /* FreeBSD etc */
1236 #endif /* LACKS_UNISTD_H */ 1238 #endif /* LACKS_UNISTD_H */
1237 #endif /* HAVE_MMAP */ 1239 #endif /* HAVE_MMAP */
1238 1240
1239 #ifndef WIN32 1241 #ifndef WIN32
1240 #ifndef malloc_getpagesize 1242 #ifndef malloc_getpagesize
1241 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ 1243 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
1242 # ifndef _SC_PAGE_SIZE 1244 # ifndef _SC_PAGE_SIZE
1243 # define _SC_PAGE_SIZE _SC_PAGESIZE 1245 # define _SC_PAGE_SIZE _SC_PAGESIZE
1244 # endif 1246 # endif
1245 # endif 1247 # endif
1246 # ifdef _SC_PAGE_SIZE 1248 # ifdef _SC_PAGE_SIZE
1247 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) 1249 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1248 # else 1250 # else
1249 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) 1251 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1250 extern size_t getpagesize(); 1252 extern size_t getpagesize ();
1251 # define malloc_getpagesize getpagesize() 1253 # define malloc_getpagesize getpagesize()
1252 # else 1254 # else
1253 # ifdef WIN32 /* use supplied emulation of getpagesize */ 1255 # ifdef WIN32 /* use supplied emulation of getpagesize */
1254 # define malloc_getpagesize getpagesize() 1256 # define malloc_getpagesize getpagesize()
1255 # else 1257 # else
1256 # ifndef LACKS_SYS_PARAM_H 1258 # ifndef LACKS_SYS_PARAM_H
1257 # include <sys/param.h> 1259 # include <sys/param.h>
1258 # endif 1260 # endif
1319 */ 1321 */
1320 1322
1321 1323
1322 /* MORECORE and MMAP must return MFAIL on failure */ 1324 /* MORECORE and MMAP must return MFAIL on failure */
1323 #define MFAIL ((void*)(MAX_SIZE_T)) 1325 #define MFAIL ((void*)(MAX_SIZE_T))
1324 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ 1326 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
1325 1327
1326 #if !HAVE_MMAP 1328 #if !HAVE_MMAP
1327 #define IS_MMAPPED_BIT (SIZE_T_ZERO) 1329 #define IS_MMAPPED_BIT (SIZE_T_ZERO)
1328 #define USE_MMAP_BIT (SIZE_T_ZERO) 1330 #define USE_MMAP_BIT (SIZE_T_ZERO)
1329 #define CALL_MMAP(s) MFAIL 1331 #define CALL_MMAP(s) MFAIL
1347 /* 1349 /*
1348 Nearly all versions of mmap support MAP_ANONYMOUS, so the following 1350 Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1349 is unlikely to be needed, but is supplied just in case. 1351 is unlikely to be needed, but is supplied just in case.
1350 */ 1352 */
1351 #define MMAP_FLAGS (MAP_PRIVATE) 1353 #define MMAP_FLAGS (MAP_PRIVATE)
1352 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ 1354 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1353 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \ 1355 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1354 (dev_zero_fd = open("/dev/zero", O_RDWR), \ 1356 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1355 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ 1357 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1356 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) 1358 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1357 #endif /* MAP_ANONYMOUS */ 1359 #endif /* MAP_ANONYMOUS */
1358 1360
1359 #define DIRECT_MMAP(s) CALL_MMAP(s) 1361 #define DIRECT_MMAP(s) CALL_MMAP(s)
1360 #else /* WIN32 */ 1362 #else /* WIN32 */
1361 1363
1362 /* Win32 MMAP via VirtualAlloc */ 1364 /* Win32 MMAP via VirtualAlloc */
1363 static void* win32mmap(size_t size) { 1365 static void *
1364 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); 1366 win32mmap (size_t size)
1365 return (ptr != 0)? ptr: MFAIL; 1367 {
1368 void *ptr =
1369 VirtualAlloc (0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1370 return (ptr != 0) ? ptr : MFAIL;
1366 } 1371 }
1367 1372
1368 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ 1373 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1369 static void* win32direct_mmap(size_t size) { 1374 static void *
1370 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, 1375 win32direct_mmap (size_t size)
1371 PAGE_READWRITE); 1376 {
1372 return (ptr != 0)? ptr: MFAIL; 1377 void *ptr =
1378 VirtualAlloc (0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
1379 PAGE_READWRITE);
1380 return (ptr != 0) ? ptr : MFAIL;
1373 } 1381 }
1374 1382
1375 /* This function supports releasing coalesed segments */ 1383 /* This function supports releasing coalesed segments */
1376 static int win32munmap(void* ptr, size_t size) { 1384 static int
1377 MEMORY_BASIC_INFORMATION minfo; 1385 win32munmap (void *ptr, size_t size)
1378 char* cptr = ptr; 1386 {
1379 while (size) { 1387 MEMORY_BASIC_INFORMATION minfo;
1380 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) 1388 char *cptr = ptr;
1381 return -1; 1389 while (size) {
1382 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || 1390 if (VirtualQuery (cptr, &minfo, sizeof (minfo)) == 0)
1383 minfo.State != MEM_COMMIT || minfo.RegionSize > size) 1391 return -1;
1384 return -1; 1392 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1385 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) 1393 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1386 return -1; 1394 return -1;
1387 cptr += minfo.RegionSize; 1395 if (VirtualFree (cptr, 0, MEM_RELEASE) == 0)
1388 size -= minfo.RegionSize; 1396 return -1;
1389 } 1397 cptr += minfo.RegionSize;
1390 return 0; 1398 size -= minfo.RegionSize;
1399 }
1400 return 0;
1391 } 1401 }
1392 1402
1393 #define CALL_MMAP(s) win32mmap(s) 1403 #define CALL_MMAP(s) win32mmap(s)
1394 #define CALL_MUNMAP(a, s) win32munmap((a), (s)) 1404 #define CALL_MUNMAP(a, s) win32munmap((a), (s))
1395 #define DIRECT_MMAP(s) win32direct_mmap(s) 1405 #define DIRECT_MMAP(s) win32direct_mmap(s)
1396 #endif /* WIN32 */ 1406 #endif /* WIN32 */
1397 #endif /* HAVE_MMAP */ 1407 #endif /* HAVE_MMAP */
1398 1408
1399 #if HAVE_MMAP && HAVE_MREMAP 1409 #if HAVE_MMAP && HAVE_MREMAP
1400 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) 1410 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1401 #else /* HAVE_MMAP && HAVE_MREMAP */ 1411 #else /* HAVE_MMAP && HAVE_MREMAP */
1402 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL 1412 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1403 #endif /* HAVE_MMAP && HAVE_MREMAP */ 1413 #endif /* HAVE_MMAP && HAVE_MREMAP */
1404 1414
1405 #if HAVE_MORECORE 1415 #if HAVE_MORECORE
1406 #define CALL_MORECORE(S) MORECORE(S) 1416 #define CALL_MORECORE(S) MORECORE(S)
1407 #else /* HAVE_MORECORE */ 1417 #else /* HAVE_MORECORE */
1408 #define CALL_MORECORE(S) MFAIL 1418 #define CALL_MORECORE(S) MFAIL
1409 #endif /* HAVE_MORECORE */ 1419 #endif /* HAVE_MORECORE */
1410 1420
1411 /* mstate bit set if continguous morecore disabled or failed */ 1421 /* mstate bit set if continguous morecore disabled or failed */
1412 #define USE_NONCONTIGUOUS_BIT (4U) 1422 #define USE_NONCONTIGUOUS_BIT (4U)
1452 Because lock-protected regions have bounded times, and there 1462 Because lock-protected regions have bounded times, and there
1453 are no recursive lock calls, we can use simple spinlocks. 1463 are no recursive lock calls, we can use simple spinlocks.
1454 */ 1464 */
1455 1465
1456 #define MLOCK_T long 1466 #define MLOCK_T long
1457 static int win32_acquire_lock (MLOCK_T *sl) { 1467 static int
1458 for (;;) { 1468 win32_acquire_lock (MLOCK_T * sl)
1469 {
1470 for (;;) {
1459 #ifdef InterlockedCompareExchangePointer 1471 #ifdef InterlockedCompareExchangePointer
1460 if (!InterlockedCompareExchange(sl, 1, 0)) 1472 if (!InterlockedCompareExchange (sl, 1, 0))
1461 return 0; 1473 return 0;
1462 #else /* Use older void* version */ 1474 #else /* Use older void* version */
1463 if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0)) 1475 if (!InterlockedCompareExchange
1464 return 0; 1476 ((void **) sl, (void *) 1, (void *) 0))
1477 return 0;
1465 #endif /* InterlockedCompareExchangePointer */ 1478 #endif /* InterlockedCompareExchangePointer */
1466 Sleep (0); 1479 Sleep (0);
1467 } 1480 }
1468 } 1481 }
1469 1482
1470 static void win32_release_lock (MLOCK_T *sl) { 1483 static void
1471 InterlockedExchange (sl, 0); 1484 win32_release_lock (MLOCK_T * sl)
1485 {
1486 InterlockedExchange (sl, 0);
1472 } 1487 }
1473 1488
1474 #define INITIAL_LOCK(l) *(l)=0 1489 #define INITIAL_LOCK(l) *(l)=0
1475 #define ACQUIRE_LOCK(l) win32_acquire_lock(l) 1490 #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
1476 #define RELEASE_LOCK(l) win32_release_lock(l) 1491 #define RELEASE_LOCK(l) win32_release_lock(l)
1479 #endif /* HAVE_MORECORE */ 1494 #endif /* HAVE_MORECORE */
1480 static MLOCK_T magic_init_mutex; 1495 static MLOCK_T magic_init_mutex;
1481 #endif /* WIN32 */ 1496 #endif /* WIN32 */
1482 1497
1483 #define USE_LOCK_BIT (2U) 1498 #define USE_LOCK_BIT (2U)
1484 #else /* USE_LOCKS */ 1499 #else /* USE_LOCKS */
1485 #define USE_LOCK_BIT (0U) 1500 #define USE_LOCK_BIT (0U)
1486 #define INITIAL_LOCK(l) 1501 #define INITIAL_LOCK(l)
1487 #endif /* USE_LOCKS */ 1502 #endif /* USE_LOCKS */
1488 1503
1489 #if USE_LOCKS && HAVE_MORECORE 1504 #if USE_LOCKS && HAVE_MORECORE
1495 #endif /* USE_LOCKS && HAVE_MORECORE */ 1510 #endif /* USE_LOCKS && HAVE_MORECORE */
1496 1511
1497 #if USE_LOCKS 1512 #if USE_LOCKS
1498 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex); 1513 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
1499 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex); 1514 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
1500 #else /* USE_LOCKS */ 1515 #else /* USE_LOCKS */
1501 #define ACQUIRE_MAGIC_INIT_LOCK() 1516 #define ACQUIRE_MAGIC_INIT_LOCK()
1502 #define RELEASE_MAGIC_INIT_LOCK() 1517 #define RELEASE_MAGIC_INIT_LOCK()
1503 #endif /* USE_LOCKS */ 1518 #endif /* USE_LOCKS */
1504 1519
1505 1520
1638 chunk is trailed by the first two fields of a fake next-chunk 1653 chunk is trailed by the first two fields of a fake next-chunk
1639 for sake of usage checks. 1654 for sake of usage checks.
1640 1655
1641 */ 1656 */
1642 1657
1643 struct malloc_chunk { 1658 struct malloc_chunk
1644 size_t prev_foot; /* Size of previous chunk (if free). */ 1659 {
1645 size_t head; /* Size and inuse bits. */ 1660 size_t prev_foot; /* Size of previous chunk (if free). */
1646 struct malloc_chunk* fd; /* double links -- used only if free. */ 1661 size_t head; /* Size and inuse bits. */
1647 struct malloc_chunk* bk; 1662 struct malloc_chunk *fd; /* double links -- used only if free. */
1663 struct malloc_chunk *bk;
1648 }; 1664 };
1649 1665
1650 typedef struct malloc_chunk mchunk; 1666 typedef struct malloc_chunk mchunk;
1651 typedef struct malloc_chunk* mchunkptr; 1667 typedef struct malloc_chunk *mchunkptr;
1652 typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ 1668 typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
1653 typedef size_t bindex_t; /* Described below */ 1669 typedef size_t bindex_t; /* Described below */
1654 typedef unsigned int binmap_t; /* Described below */ 1670 typedef unsigned int binmap_t; /* Described below */
1655 typedef unsigned int flag_t; /* The type of various bit flag sets */ 1671 typedef unsigned int flag_t; /* The type of various bit flag sets */
1656 1672
1657 /* ------------------- Chunks sizes and alignments ----------------------- */ 1673 /* ------------------- Chunks sizes and alignments ----------------------- */
1658 1674
1659 #define MCHUNK_SIZE (sizeof(mchunk)) 1675 #define MCHUNK_SIZE (sizeof(mchunk))
1660 1676
1843 bins. Under current bin calculations, this ranges from 6 up to 21 1859 bins. Under current bin calculations, this ranges from 6 up to 21
1844 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case 1860 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
1845 is of course much better. 1861 is of course much better.
1846 */ 1862 */
1847 1863
1848 struct malloc_tree_chunk { 1864 struct malloc_tree_chunk
1849 /* The first four fields must be compatible with malloc_chunk */ 1865 {
1850 size_t prev_foot; 1866 /* The first four fields must be compatible with malloc_chunk */
1851 size_t head; 1867 size_t prev_foot;
1852 struct malloc_tree_chunk* fd; 1868 size_t head;
1853 struct malloc_tree_chunk* bk; 1869 struct malloc_tree_chunk *fd;
1854 1870 struct malloc_tree_chunk *bk;
1855 struct malloc_tree_chunk* child[2]; 1871
1856 struct malloc_tree_chunk* parent; 1872 struct malloc_tree_chunk *child[2];
1857 bindex_t index; 1873 struct malloc_tree_chunk *parent;
1874 bindex_t index;
1858 }; 1875 };
1859 1876
1860 typedef struct malloc_tree_chunk tchunk; 1877 typedef struct malloc_tree_chunk tchunk;
1861 typedef struct malloc_tree_chunk* tchunkptr; 1878 typedef struct malloc_tree_chunk *tchunkptr;
1862 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ 1879 typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
1863 1880
1864 /* A little helper macro for trees */ 1881 /* A little helper macro for trees */
1865 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) 1882 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1866 1883
1867 /* ----------------------------- Segments -------------------------------- */ 1884 /* ----------------------------- Segments -------------------------------- */
1919 * If neither bit is set, then the segment was obtained using 1936 * If neither bit is set, then the segment was obtained using
1920 MORECORE so can be merged with surrounding MORECORE'd segments 1937 MORECORE so can be merged with surrounding MORECORE'd segments
1921 and deallocated/trimmed using MORECORE with negative arguments. 1938 and deallocated/trimmed using MORECORE with negative arguments.
1922 */ 1939 */
1923 1940
1924 struct malloc_segment { 1941 struct malloc_segment
1925 char* base; /* base address */ 1942 {
1926 size_t size; /* allocated size */ 1943 char *base; /* base address */
1927 struct malloc_segment* next; /* ptr to next segment */ 1944 size_t size; /* allocated size */
1928 flag_t sflags; /* mmap and extern flag */ 1945 struct malloc_segment *next; /* ptr to next segment */
1946 flag_t sflags; /* mmap and extern flag */
1929 }; 1947 };
1930 1948
1931 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT) 1949 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
1932 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) 1950 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1933 1951
1934 typedef struct malloc_segment msegment; 1952 typedef struct malloc_segment msegment;
1935 typedef struct malloc_segment* msegmentptr; 1953 typedef struct malloc_segment *msegmentptr;
1936 1954
1937 /* ---------------------------- malloc_state ----------------------------- */ 1955 /* ---------------------------- malloc_state ----------------------------- */
1938 1956
1939 /* 1957 /*
1940 A malloc_state holds all of the bookkeeping for a space. 1958 A malloc_state holds all of the bookkeeping for a space.
2017 #define TREEBIN_SHIFT (8U) 2035 #define TREEBIN_SHIFT (8U)
2018 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) 2036 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2019 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) 2037 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2020 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) 2038 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2021 2039
2022 struct malloc_state { 2040 struct malloc_state
2023 binmap_t smallmap; 2041 {
2024 binmap_t treemap; 2042 binmap_t smallmap;
2025 size_t dvsize; 2043 binmap_t treemap;
2026 size_t topsize; 2044 size_t dvsize;
2027 char* least_addr; 2045 size_t topsize;
2028 mchunkptr dv; 2046 char *least_addr;
2029 mchunkptr top; 2047 mchunkptr dv;
2030 size_t trim_check; 2048 mchunkptr top;
2031 size_t magic; 2049 size_t trim_check;
2032 mchunkptr smallbins[(NSMALLBINS+1)*2]; 2050 size_t magic;
2033 tbinptr treebins[NTREEBINS]; 2051 mchunkptr smallbins[(NSMALLBINS + 1) * 2];
2034 size_t footprint; 2052 tbinptr treebins[NTREEBINS];
2035 size_t max_footprint; 2053 size_t footprint;
2036 flag_t mflags; 2054 size_t max_footprint;
2055 flag_t mflags;
2037 #if USE_LOCKS 2056 #if USE_LOCKS
2038 MLOCK_T mutex; /* locate lock among fields that rarely change */ 2057 MLOCK_T mutex; /* locate lock among fields that rarely change */
2039 #endif /* USE_LOCKS */ 2058 #endif /* USE_LOCKS */
2040 msegment seg; 2059 msegment seg;
2041 }; 2060 };
2042 2061
2043 typedef struct malloc_state* mstate; 2062 typedef struct malloc_state *mstate;
2044 2063
2045 /* ------------- Global malloc_state and malloc_params ------------------- */ 2064 /* ------------- Global malloc_state and malloc_params ------------------- */
2046 2065
2047 /* 2066 /*
2048 malloc_params holds global properties, including those that can be 2067 malloc_params holds global properties, including those that can be
2049 dynamically set using mallopt. There is a single instance, mparams, 2068 dynamically set using mallopt. There is a single instance, mparams,
2050 initialized in init_mparams. 2069 initialized in init_mparams.
2051 */ 2070 */
2052 2071
2053 struct malloc_params { 2072 struct malloc_params
2054 size_t magic; 2073 {
2055 size_t page_size; 2074 size_t magic;
2056 size_t granularity; 2075 size_t page_size;
2057 size_t mmap_threshold; 2076 size_t granularity;
2058 size_t trim_threshold; 2077 size_t mmap_threshold;
2059 flag_t default_mflags; 2078 size_t trim_threshold;
2079 flag_t default_mflags;
2060 }; 2080 };
2061 2081
2062 static struct malloc_params mparams; 2082 static struct malloc_params mparams;
2063 2083
2064 /* The global malloc_state used for all non-"mspace" calls */ 2084 /* The global malloc_state used for all non-"mspace" calls */
2103 /* True if segment S holds address A */ 2123 /* True if segment S holds address A */
2104 #define segment_holds(S, A)\ 2124 #define segment_holds(S, A)\
2105 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) 2125 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2106 2126
2107 /* Return segment holding given address */ 2127 /* Return segment holding given address */
2108 static msegmentptr segment_holding(mstate m, char* addr) { 2128 static msegmentptr
2109 msegmentptr sp = &m->seg; 2129 segment_holding (mstate m, char *addr)
2110 for (;;) { 2130 {
2111 if (addr >= sp->base && addr < sp->base + sp->size) 2131 msegmentptr sp = &m->seg;
2112 return sp; 2132 for (;;) {
2113 if ((sp = sp->next) == 0) 2133 if (addr >= sp->base && addr < sp->base + sp->size)
2114 return 0; 2134 return sp;
2115 } 2135 if ((sp = sp->next) == 0)
2136 return 0;
2137 }
2116 } 2138 }
2117 2139
2118 /* Return true if segment contains a segment link */ 2140 /* Return true if segment contains a segment link */
2119 static int has_segment_link(mstate m, msegmentptr ss) { 2141 static int
2120 msegmentptr sp = &m->seg; 2142 has_segment_link (mstate m, msegmentptr ss)
2121 for (;;) { 2143 {
2122 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) 2144 msegmentptr sp = &m->seg;
2123 return 1; 2145 for (;;) {
2124 if ((sp = sp->next) == 0) 2146 if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
2125 return 0; 2147 return 1;
2126 } 2148 if ((sp = sp->next) == 0)
2149 return 0;
2150 }
2127 } 2151 }
2128 2152
2129 #ifndef MORECORE_CANNOT_TRIM 2153 #ifndef MORECORE_CANNOT_TRIM
2130 #define should_trim(M,s) ((s) > (M)->trim_check) 2154 #define should_trim(M,s) ((s) > (M)->trim_check)
2131 #else /* MORECORE_CANNOT_TRIM */ 2155 #else /* MORECORE_CANNOT_TRIM */
2132 #define should_trim(M,s) (0) 2156 #define should_trim(M,s) (0)
2133 #endif /* MORECORE_CANNOT_TRIM */ 2157 #endif /* MORECORE_CANNOT_TRIM */
2134 2158
2135 /* 2159 /*
2136 TOP_FOOT_SIZE is padding at the end of a segment, including space 2160 TOP_FOOT_SIZE is padding at the end of a segment, including space
2158 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } 2182 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2159 #else /* USE_LOCKS */ 2183 #else /* USE_LOCKS */
2160 2184
2161 #ifndef PREACTION 2185 #ifndef PREACTION
2162 #define PREACTION(M) (0) 2186 #define PREACTION(M) (0)
2163 #endif /* PREACTION */ 2187 #endif /* PREACTION */
2164 2188
2165 #ifndef POSTACTION 2189 #ifndef POSTACTION
2166 #define POSTACTION(M) 2190 #define POSTACTION(M)
2167 #endif /* POSTACTION */ 2191 #endif /* POSTACTION */
2168 2192
2169 #endif /* USE_LOCKS */ 2193 #endif /* USE_LOCKS */
2170 2194
2171 /* 2195 /*
2172 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. 2196 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2180 2204
2181 /* A count of the number of corruption errors causing resets */ 2205 /* A count of the number of corruption errors causing resets */
2182 int malloc_corruption_error_count; 2206 int malloc_corruption_error_count;
2183 2207
2184 /* default corruption action */ 2208 /* default corruption action */
2185 static void reset_on_error(mstate m); 2209 static void reset_on_error (mstate m);
2186 2210
2187 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) 2211 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2188 #define USAGE_ERROR_ACTION(m, p) 2212 #define USAGE_ERROR_ACTION(m, p)
2189 2213
2190 #else /* PROCEED_ON_ERROR */ 2214 #else /* PROCEED_ON_ERROR */
2216 #define check_top_chunk(M,P) do_check_top_chunk(M,P) 2240 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
2217 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) 2241 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2218 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) 2242 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2219 #define check_malloc_state(M) do_check_malloc_state(M) 2243 #define check_malloc_state(M) do_check_malloc_state(M)
2220 2244
2221 static void do_check_any_chunk(mstate m, mchunkptr p); 2245 static void do_check_any_chunk (mstate m, mchunkptr p);
2222 static void do_check_top_chunk(mstate m, mchunkptr p); 2246 static void do_check_top_chunk (mstate m, mchunkptr p);
2223 static void do_check_mmapped_chunk(mstate m, mchunkptr p); 2247 static void do_check_mmapped_chunk (mstate m, mchunkptr p);
2224 static void do_check_inuse_chunk(mstate m, mchunkptr p); 2248 static void do_check_inuse_chunk (mstate m, mchunkptr p);
2225 static void do_check_free_chunk(mstate m, mchunkptr p); 2249 static void do_check_free_chunk (mstate m, mchunkptr p);
2226 static void do_check_malloced_chunk(mstate m, void* mem, size_t s); 2250 static void do_check_malloced_chunk (mstate m, void *mem, size_t s);
2227 static void do_check_tree(mstate m, tchunkptr t); 2251 static void do_check_tree (mstate m, tchunkptr t);
2228 static void do_check_treebin(mstate m, bindex_t i); 2252 static void do_check_treebin (mstate m, bindex_t i);
2229 static void do_check_smallbin(mstate m, bindex_t i); 2253 static void do_check_smallbin (mstate m, bindex_t i);
2230 static void do_check_malloc_state(mstate m); 2254 static void do_check_malloc_state (mstate m);
2231 static int bin_find(mstate m, mchunkptr x); 2255 static int bin_find (mstate m, mchunkptr x);
2232 static size_t traverse_and_check(mstate m); 2256 static size_t traverse_and_check (mstate m);
2233 #endif /* DEBUG */ 2257 #endif /* DEBUG */
2234 2258
2235 /* ---------------------------- Indexing Bins ---------------------------- */ 2259 /* ---------------------------- Indexing Bins ---------------------------- */
2236 2260
2237 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) 2261 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2392 #endif /* !INSECURE */ 2416 #endif /* !INSECURE */
2393 2417
2394 #if (FOOTERS && !INSECURE) 2418 #if (FOOTERS && !INSECURE)
2395 /* Check if (alleged) mstate m has expected magic field */ 2419 /* Check if (alleged) mstate m has expected magic field */
2396 #define ok_magic(M) ((M)->magic == mparams.magic) 2420 #define ok_magic(M) ((M)->magic == mparams.magic)
2397 #else /* (FOOTERS && !INSECURE) */ 2421 #else /* (FOOTERS && !INSECURE) */
2398 #define ok_magic(M) (1) 2422 #define ok_magic(M) (1)
2399 #endif /* (FOOTERS && !INSECURE) */ 2423 #endif /* (FOOTERS && !INSECURE) */
2400 2424
2401 2425
2402 /* In gcc, use __builtin_expect to minimize impact of checks */ 2426 /* In gcc, use __builtin_expect to minimize impact of checks */
2457 #endif /* !FOOTERS */ 2481 #endif /* !FOOTERS */
2458 2482
2459 /* ---------------------------- setting mparams -------------------------- */ 2483 /* ---------------------------- setting mparams -------------------------- */
2460 2484
2461 /* Initialize mparams */ 2485 /* Initialize mparams */
2462 static int init_mparams(void) { 2486 static int
2463 if (mparams.page_size == 0) { 2487 init_mparams (void)
2464 size_t s; 2488 {
2465 2489 if (mparams.page_size == 0) {
2466 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; 2490 size_t s;
2467 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; 2491
2492 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
2493 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
2468 #if MORECORE_CONTIGUOUS 2494 #if MORECORE_CONTIGUOUS
2469 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; 2495 mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT;
2470 #else /* MORECORE_CONTIGUOUS */ 2496 #else /* MORECORE_CONTIGUOUS */
2471 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; 2497 mparams.default_mflags =
2498 USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
2472 #endif /* MORECORE_CONTIGUOUS */ 2499 #endif /* MORECORE_CONTIGUOUS */
2473 2500
2474 #if (FOOTERS && !INSECURE) 2501 #if (FOOTERS && !INSECURE)
2475 { 2502 {
2476 #if USE_DEV_RANDOM 2503 #if USE_DEV_RANDOM
2477 int fd; 2504 int fd;
2478 unsigned char buf[sizeof(size_t)]; 2505 unsigned char buf[sizeof (size_t)];
2479 /* Try to use /dev/urandom, else fall back on using time */ 2506 /* Try to use /dev/urandom, else fall back on using time */
2480 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && 2507 if ((fd = open ("/dev/urandom", O_RDONLY)) >= 0 &&
2481 read(fd, buf, sizeof(buf)) == sizeof(buf)) { 2508 read (fd, buf, sizeof (buf)) == sizeof (buf)) {
2482 s = *((size_t *) buf); 2509 s = *((size_t *) buf);
2483 close(fd); 2510 close (fd);
2484 } 2511 } else
2485 else
2486 #endif /* USE_DEV_RANDOM */ 2512 #endif /* USE_DEV_RANDOM */
2487 s = (size_t)(time(0) ^ (size_t)0x55555555U); 2513 s = (size_t) (time (0) ^ (size_t) 0x55555555U);
2488 2514
2489 s |= (size_t)8U; /* ensure nonzero */ 2515 s |= (size_t) 8U; /* ensure nonzero */
2490 s &= ~(size_t)7U; /* improve chances of fault for bad values */ 2516 s &= ~(size_t) 7U; /* improve chances of fault for bad values */
2491 2517
2492 } 2518 }
2493 #else /* (FOOTERS && !INSECURE) */ 2519 #else /* (FOOTERS && !INSECURE) */
2494 s = (size_t)0x58585858U; 2520 s = (size_t) 0x58585858U;
2495 #endif /* (FOOTERS && !INSECURE) */ 2521 #endif /* (FOOTERS && !INSECURE) */
2496 ACQUIRE_MAGIC_INIT_LOCK(); 2522 ACQUIRE_MAGIC_INIT_LOCK ();
2497 if (mparams.magic == 0) { 2523 if (mparams.magic == 0) {
2498 mparams.magic = s; 2524 mparams.magic = s;
2499 /* Set up lock for main malloc area */ 2525 /* Set up lock for main malloc area */
2500 INITIAL_LOCK(&gm->mutex); 2526 INITIAL_LOCK (&gm->mutex);
2501 gm->mflags = mparams.default_mflags; 2527 gm->mflags = mparams.default_mflags;
2502 } 2528 }
2503 RELEASE_MAGIC_INIT_LOCK(); 2529 RELEASE_MAGIC_INIT_LOCK ();
2504 2530
2505 #ifndef WIN32 2531 #ifndef WIN32
2506 mparams.page_size = malloc_getpagesize; 2532 mparams.page_size = malloc_getpagesize;
2507 mparams.granularity = ((DEFAULT_GRANULARITY != 0)? 2533 mparams.granularity = ((DEFAULT_GRANULARITY != 0) ?
2508 DEFAULT_GRANULARITY : mparams.page_size); 2534 DEFAULT_GRANULARITY : mparams.page_size);
2509 #else /* WIN32 */ 2535 #else /* WIN32 */
2510 { 2536 {
2511 SYSTEM_INFO system_info; 2537 SYSTEM_INFO system_info;
2512 GetSystemInfo(&system_info); 2538 GetSystemInfo (&system_info);
2513 mparams.page_size = system_info.dwPageSize; 2539 mparams.page_size = system_info.dwPageSize;
2514 mparams.granularity = system_info.dwAllocationGranularity; 2540 mparams.granularity = system_info.dwAllocationGranularity;
2515 } 2541 }
2516 #endif /* WIN32 */ 2542 #endif /* WIN32 */
2517 2543
2518 /* Sanity-check configuration: 2544 /* Sanity-check configuration:
2519 size_t must be unsigned and as wide as pointer type. 2545 size_t must be unsigned and as wide as pointer type.
2520 ints must be at least 4 bytes. 2546 ints must be at least 4 bytes.
2521 alignment must be at least 8. 2547 alignment must be at least 8.
2522 Alignment, min chunk size, and page size must all be powers of 2. 2548 Alignment, min chunk size, and page size must all be powers of 2.
2523 */ 2549 */
2524 if ((sizeof(size_t) != sizeof(char*)) || 2550 if ((sizeof (size_t) != sizeof (char *)) ||
2525 (MAX_SIZE_T < MIN_CHUNK_SIZE) || 2551 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
2526 (sizeof(int) < 4) || 2552 (sizeof (int) < 4) ||
2527 (MALLOC_ALIGNMENT < (size_t)8U) || 2553 (MALLOC_ALIGNMENT < (size_t) 8U) ||
2528 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || 2554 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
2529 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || 2555 ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
2530 ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || 2556 ((mparams.granularity & (mparams.granularity - SIZE_T_ONE)) != 0)
2531 ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) 2557 || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
2532 ABORT; 2558 ABORT;
2533 } 2559 }
2534 return 0; 2560 return 0;
2535 } 2561 }
2536 2562
2537 /* support for mallopt */ 2563 /* support for mallopt */
2538 static int change_mparam(int param_number, int value) { 2564 static int
2539 size_t val = (size_t)value; 2565 change_mparam (int param_number, int value)
2540 init_mparams(); 2566 {
2541 switch(param_number) { 2567 size_t val = (size_t) value;
2542 case M_TRIM_THRESHOLD: 2568 init_mparams ();
2543 mparams.trim_threshold = val; 2569 switch (param_number) {
2544 return 1; 2570 case M_TRIM_THRESHOLD:
2545 case M_GRANULARITY: 2571 mparams.trim_threshold = val;
2546 if (val >= mparams.page_size && ((val & (val-1)) == 0)) { 2572 return 1;
2547 mparams.granularity = val; 2573 case M_GRANULARITY:
2548 return 1; 2574 if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
2549 } 2575 mparams.granularity = val;
2550 else 2576 return 1;
2551 return 0; 2577 } else
2552 case M_MMAP_THRESHOLD: 2578 return 0;
2553 mparams.mmap_threshold = val; 2579 case M_MMAP_THRESHOLD:
2554 return 1; 2580 mparams.mmap_threshold = val;
2555 default: 2581 return 1;
2556 return 0; 2582 default:
2557 } 2583 return 0;
2584 }
2558 } 2585 }
2559 2586
2560 #if DEBUG 2587 #if DEBUG
2561 /* ------------------------- Debugging Support --------------------------- */ 2588 /* ------------------------- Debugging Support --------------------------- */
2562 2589
2563 /* Check properties of any chunk, whether free, inuse, mmapped etc */ 2590 /* Check properties of any chunk, whether free, inuse, mmapped etc */
2564 static void do_check_any_chunk(mstate m, mchunkptr p) { 2591 static void
2565 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); 2592 do_check_any_chunk (mstate m, mchunkptr p)
2566 assert(ok_address(m, p)); 2593 {
2594 assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD));
2595 assert (ok_address (m, p));
2567 } 2596 }
2568 2597
2569 /* Check properties of top chunk */ 2598 /* Check properties of top chunk */
2570 static void do_check_top_chunk(mstate m, mchunkptr p) { 2599 static void
2571 msegmentptr sp = segment_holding(m, (char*)p); 2600 do_check_top_chunk (mstate m, mchunkptr p)
2572 size_t sz = chunksize(p); 2601 {
2573 assert(sp != 0); 2602 msegmentptr sp = segment_holding (m, (char *) p);
2574 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); 2603 size_t sz = chunksize (p);
2575 assert(ok_address(m, p)); 2604 assert (sp != 0);
2576 assert(sz == m->topsize); 2605 assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD));
2577 assert(sz > 0); 2606 assert (ok_address (m, p));
2578 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); 2607 assert (sz == m->topsize);
2579 assert(pinuse(p)); 2608 assert (sz > 0);
2580 assert(!next_pinuse(p)); 2609 assert (sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
2610 assert (pinuse (p));
2611 assert (!next_pinuse (p));
2581 } 2612 }
2582 2613
2583 /* Check properties of (inuse) mmapped chunks */ 2614 /* Check properties of (inuse) mmapped chunks */
2584 static void do_check_mmapped_chunk(mstate m, mchunkptr p) { 2615 static void
2585 size_t sz = chunksize(p); 2616 do_check_mmapped_chunk (mstate m, mchunkptr p)
2586 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); 2617 {
2587 assert(is_mmapped(p)); 2618 size_t sz = chunksize (p);
2588 assert(use_mmap(m)); 2619 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
2589 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); 2620 assert (is_mmapped (p));
2590 assert(ok_address(m, p)); 2621 assert (use_mmap (m));
2591 assert(!is_small(sz)); 2622 assert ((is_aligned (chunk2mem (p))) || (p->head == FENCEPOST_HEAD));
2592 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); 2623 assert (ok_address (m, p));
2593 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); 2624 assert (!is_small (sz));
2594 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); 2625 assert ((len & (mparams.page_size - SIZE_T_ONE)) == 0);
2626 assert (chunk_plus_offset (p, sz)->head == FENCEPOST_HEAD);
2627 assert (chunk_plus_offset (p, sz + SIZE_T_SIZE)->head == 0);
2595 } 2628 }
2596 2629
2597 /* Check properties of inuse chunks */ 2630 /* Check properties of inuse chunks */
2598 static void do_check_inuse_chunk(mstate m, mchunkptr p) { 2631 static void
2599 do_check_any_chunk(m, p); 2632 do_check_inuse_chunk (mstate m, mchunkptr p)
2600 assert(cinuse(p)); 2633 {
2601 assert(next_pinuse(p)); 2634 do_check_any_chunk (m, p);
2602 /* If not pinuse and not mmapped, previous chunk has OK offset */ 2635 assert (cinuse (p));
2603 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); 2636 assert (next_pinuse (p));
2604 if (is_mmapped(p)) 2637 /* If not pinuse and not mmapped, previous chunk has OK offset */
2605 do_check_mmapped_chunk(m, p); 2638 assert (is_mmapped (p) || pinuse (p) || next_chunk (prev_chunk (p)) == p);
2639 if (is_mmapped (p))
2640 do_check_mmapped_chunk (m, p);
2606 } 2641 }
2607 2642
2608 /* Check properties of free chunks */ 2643 /* Check properties of free chunks */
2609 static void do_check_free_chunk(mstate m, mchunkptr p) { 2644 static void
2610 size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); 2645 do_check_free_chunk (mstate m, mchunkptr p)
2611 mchunkptr next = chunk_plus_offset(p, sz); 2646 {
2612 do_check_any_chunk(m, p); 2647 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2613 assert(!cinuse(p)); 2648 mchunkptr next = chunk_plus_offset (p, sz);
2614 assert(!next_pinuse(p)); 2649 do_check_any_chunk (m, p);
2615 assert (!is_mmapped(p)); 2650 assert (!cinuse (p));
2616 if (p != m->dv && p != m->top) { 2651 assert (!next_pinuse (p));
2617 if (sz >= MIN_CHUNK_SIZE) { 2652 assert (!is_mmapped (p));
2618 assert((sz & CHUNK_ALIGN_MASK) == 0); 2653 if (p != m->dv && p != m->top) {
2619 assert(is_aligned(chunk2mem(p))); 2654 if (sz >= MIN_CHUNK_SIZE) {
2620 assert(next->prev_foot == sz); 2655 assert ((sz & CHUNK_ALIGN_MASK) == 0);
2621 assert(pinuse(p)); 2656 assert (is_aligned (chunk2mem (p)));
2622 assert (next == m->top || cinuse(next)); 2657 assert (next->prev_foot == sz);
2623 assert(p->fd->bk == p); 2658 assert (pinuse (p));
2624 assert(p->bk->fd == p); 2659 assert (next == m->top || cinuse (next));
2625 } 2660 assert (p->fd->bk == p);
2626 else /* markers are always of size SIZE_T_SIZE */ 2661 assert (p->bk->fd == p);
2627 assert(sz == SIZE_T_SIZE); 2662 } else /* markers are always of size SIZE_T_SIZE */
2628 } 2663 assert (sz == SIZE_T_SIZE);
2664 }
2629 } 2665 }
2630 2666
2631 /* Check properties of malloced chunks at the point they are malloced */ 2667 /* Check properties of malloced chunks at the point they are malloced */
2632 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { 2668 static void
2633 if (mem != 0) { 2669 do_check_malloced_chunk (mstate m, void *mem, size_t s)
2634 mchunkptr p = mem2chunk(mem); 2670 {
2635 size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); 2671 if (mem != 0) {
2636 do_check_inuse_chunk(m, p); 2672 mchunkptr p = mem2chunk (mem);
2637 assert((sz & CHUNK_ALIGN_MASK) == 0); 2673 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2638 assert(sz >= MIN_CHUNK_SIZE); 2674 do_check_inuse_chunk (m, p);
2639 assert(sz >= s); 2675 assert ((sz & CHUNK_ALIGN_MASK) == 0);
2640 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ 2676 assert (sz >= MIN_CHUNK_SIZE);
2641 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); 2677 assert (sz >= s);
2642 } 2678 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
2679 assert (is_mmapped (p) || sz < (s + MIN_CHUNK_SIZE));
2680 }
2643 } 2681 }
2644 2682
2645 /* Check a tree and its subtrees. */ 2683 /* Check a tree and its subtrees. */
2646 static void do_check_tree(mstate m, tchunkptr t) { 2684 static void
2647 tchunkptr head = 0; 2685 do_check_tree (mstate m, tchunkptr t)
2648 tchunkptr u = t; 2686 {
2649 bindex_t tindex = t->index; 2687 tchunkptr head = 0;
2650 size_t tsize = chunksize(t); 2688 tchunkptr u = t;
2651 bindex_t idx; 2689 bindex_t tindex = t->index;
2652 compute_tree_index(tsize, idx); 2690 size_t tsize = chunksize (t);
2653 assert(tindex == idx); 2691 bindex_t idx;
2654 assert(tsize >= MIN_LARGE_SIZE); 2692 compute_tree_index (tsize, idx);
2655 assert(tsize >= minsize_for_tree_index(idx)); 2693 assert (tindex == idx);
2656 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); 2694 assert (tsize >= MIN_LARGE_SIZE);
2657 2695 assert (tsize >= minsize_for_tree_index (idx));
2658 do { /* traverse through chain of same-sized nodes */ 2696 assert ((idx == NTREEBINS - 1)
2659 do_check_any_chunk(m, ((mchunkptr)u)); 2697 || (tsize < minsize_for_tree_index ((idx + 1))));
2660 assert(u->index == tindex); 2698
2661 assert(chunksize(u) == tsize); 2699 do { /* traverse through chain of same-sized nodes */
2662 assert(!cinuse(u)); 2700 do_check_any_chunk (m, ((mchunkptr) u));
2663 assert(!next_pinuse(u)); 2701 assert (u->index == tindex);
2664 assert(u->fd->bk == u); 2702 assert (chunksize (u) == tsize);
2665 assert(u->bk->fd == u); 2703 assert (!cinuse (u));
2666 if (u->parent == 0) { 2704 assert (!next_pinuse (u));
2667 assert(u->child[0] == 0); 2705 assert (u->fd->bk == u);
2668 assert(u->child[1] == 0); 2706 assert (u->bk->fd == u);
2669 } 2707 if (u->parent == 0) {
2670 else { 2708 assert (u->child[0] == 0);
2671 assert(head == 0); /* only one node on chain has parent */ 2709 assert (u->child[1] == 0);
2672 head = u; 2710 } else {
2673 assert(u->parent != u); 2711 assert (head == 0); /* only one node on chain has parent */
2674 assert (u->parent->child[0] == u || 2712 head = u;
2675 u->parent->child[1] == u || 2713 assert (u->parent != u);
2676 *((tbinptr*)(u->parent)) == u); 2714 assert (u->parent->child[0] == u ||
2677 if (u->child[0] != 0) { 2715 u->parent->child[1] == u ||
2678 assert(u->child[0]->parent == u); 2716 *((tbinptr *) (u->parent)) == u);
2679 assert(u->child[0] != u); 2717 if (u->child[0] != 0) {
2680 do_check_tree(m, u->child[0]); 2718 assert (u->child[0]->parent == u);
2681 } 2719 assert (u->child[0] != u);
2682 if (u->child[1] != 0) { 2720 do_check_tree (m, u->child[0]);
2683 assert(u->child[1]->parent == u); 2721 }
2684 assert(u->child[1] != u); 2722 if (u->child[1] != 0) {
2685 do_check_tree(m, u->child[1]); 2723 assert (u->child[1]->parent == u);
2686 } 2724 assert (u->child[1] != u);
2687 if (u->child[0] != 0 && u->child[1] != 0) { 2725 do_check_tree (m, u->child[1]);
2688 assert(chunksize(u->child[0]) < chunksize(u->child[1])); 2726 }
2689 } 2727 if (u->child[0] != 0 && u->child[1] != 0) {
2690 } 2728 assert (chunksize (u->child[0]) < chunksize (u->child[1]));
2691 u = u->fd; 2729 }
2692 } while (u != t); 2730 }
2693 assert(head != 0); 2731 u = u->fd;
2732 }
2733 while (u != t);
2734 assert (head != 0);
2694 } 2735 }
2695 2736
2696 /* Check all the chunks in a treebin. */ 2737 /* Check all the chunks in a treebin. */
2697 static void do_check_treebin(mstate m, bindex_t i) { 2738 static void
2698 tbinptr* tb = treebin_at(m, i); 2739 do_check_treebin (mstate m, bindex_t i)
2699 tchunkptr t = *tb; 2740 {
2700 int empty = (m->treemap & (1U << i)) == 0; 2741 tbinptr *tb = treebin_at (m, i);
2701 if (t == 0) 2742 tchunkptr t = *tb;
2702 assert(empty); 2743 int empty = (m->treemap & (1U << i)) == 0;
2703 if (!empty) 2744 if (t == 0)
2704 do_check_tree(m, t); 2745 assert (empty);
2746 if (!empty)
2747 do_check_tree (m, t);
2705 } 2748 }
2706 2749
2707 /* Check all the chunks in a smallbin. */ 2750 /* Check all the chunks in a smallbin. */
2708 static void do_check_smallbin(mstate m, bindex_t i) { 2751 static void
2709 sbinptr b = smallbin_at(m, i); 2752 do_check_smallbin (mstate m, bindex_t i)
2710 mchunkptr p = b->bk; 2753 {
2711 unsigned int empty = (m->smallmap & (1U << i)) == 0; 2754 sbinptr b = smallbin_at (m, i);
2712 if (p == b) 2755 mchunkptr p = b->bk;
2713 assert(empty); 2756 unsigned int empty = (m->smallmap & (1U << i)) == 0;
2714 if (!empty) { 2757 if (p == b)
2715 for (; p != b; p = p->bk) { 2758 assert (empty);
2716 size_t size = chunksize(p); 2759 if (!empty) {
2717 mchunkptr q; 2760 for (; p != b; p = p->bk) {
2718 /* each chunk claims to be free */ 2761 size_t size = chunksize (p);
2719 do_check_free_chunk(m, p); 2762 mchunkptr q;
2720 /* chunk belongs in bin */ 2763 /* each chunk claims to be free */
2721 assert(small_index(size) == i); 2764 do_check_free_chunk (m, p);
2722 assert(p->bk == b || chunksize(p->bk) == chunksize(p)); 2765 /* chunk belongs in bin */
2723 /* chunk is followed by an inuse chunk */ 2766 assert (small_index (size) == i);
2724 q = next_chunk(p); 2767 assert (p->bk == b || chunksize (p->bk) == chunksize (p));
2725 if (q->head != FENCEPOST_HEAD) 2768 /* chunk is followed by an inuse chunk */
2726 do_check_inuse_chunk(m, q); 2769 q = next_chunk (p);
2727 } 2770 if (q->head != FENCEPOST_HEAD)
2728 } 2771 do_check_inuse_chunk (m, q);
2772 }
2773 }
2729 } 2774 }
2730 2775
2731 /* Find x in a bin. Used in other check functions. */ 2776 /* Find x in a bin. Used in other check functions. */
2732 static int bin_find(mstate m, mchunkptr x) { 2777 static int
2733 size_t size = chunksize(x); 2778 bin_find (mstate m, mchunkptr x)
2734 if (is_small(size)) { 2779 {
2735 bindex_t sidx = small_index(size); 2780 size_t size = chunksize (x);
2736 sbinptr b = smallbin_at(m, sidx); 2781 if (is_small (size)) {
2737 if (smallmap_is_marked(m, sidx)) { 2782 bindex_t sidx = small_index (size);
2738 mchunkptr p = b; 2783 sbinptr b = smallbin_at (m, sidx);
2739 do { 2784 if (smallmap_is_marked (m, sidx)) {
2740 if (p == x) 2785 mchunkptr p = b;
2741 return 1; 2786 do {
2742 } while ((p = p->fd) != b); 2787 if (p == x)
2743 } 2788 return 1;
2744 } 2789 }
2745 else { 2790 while ((p = p->fd) != b);
2746 bindex_t tidx; 2791 }
2747 compute_tree_index(size, tidx); 2792 } else {
2748 if (treemap_is_marked(m, tidx)) { 2793 bindex_t tidx;
2749 tchunkptr t = *treebin_at(m, tidx); 2794 compute_tree_index (size, tidx);
2750 size_t sizebits = size << leftshift_for_tree_index(tidx); 2795 if (treemap_is_marked (m, tidx)) {
2751 while (t != 0 && chunksize(t) != size) { 2796 tchunkptr t = *treebin_at (m, tidx);
2752 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; 2797 size_t sizebits = size << leftshift_for_tree_index (tidx);
2753 sizebits <<= 1; 2798 while (t != 0 && chunksize (t) != size) {
2754 } 2799 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
2755 if (t != 0) { 2800 sizebits <<= 1;
2756 tchunkptr u = t; 2801 }
2757 do { 2802 if (t != 0) {
2758 if (u == (tchunkptr)x) 2803 tchunkptr u = t;
2759 return 1; 2804 do {
2760 } while ((u = u->fd) != t); 2805 if (u == (tchunkptr) x)
2761 } 2806 return 1;
2762 } 2807 }
2763 } 2808 while ((u = u->fd) != t);
2764 return 0; 2809 }
2810 }
2811 }
2812 return 0;
2765 } 2813 }
2766 2814
2767 /* Traverse each chunk and check it; return total */ 2815 /* Traverse each chunk and check it; return total */
2768 static size_t traverse_and_check(mstate m) { 2816 static size_t
2769 size_t sum = 0; 2817 traverse_and_check (mstate m)
2770 if (is_initialized(m)) { 2818 {
2771 msegmentptr s = &m->seg; 2819 size_t sum = 0;
2772 sum += m->topsize + TOP_FOOT_SIZE; 2820 if (is_initialized (m)) {
2773 while (s != 0) { 2821 msegmentptr s = &m->seg;
2774 mchunkptr q = align_as_chunk(s->base); 2822 sum += m->topsize + TOP_FOOT_SIZE;
2775 mchunkptr lastq = 0; 2823 while (s != 0) {
2776 assert(pinuse(q)); 2824 mchunkptr q = align_as_chunk (s->base);
2777 while (segment_holds(s, q) && 2825 mchunkptr lastq = 0;
2778 q != m->top && q->head != FENCEPOST_HEAD) { 2826 assert (pinuse (q));
2779 sum += chunksize(q); 2827 while (segment_holds (s, q) &&
2780 if (cinuse(q)) { 2828 q != m->top && q->head != FENCEPOST_HEAD) {
2781 assert(!bin_find(m, q)); 2829 sum += chunksize (q);
2782 do_check_inuse_chunk(m, q); 2830 if (cinuse (q)) {
2831 assert (!bin_find (m, q));
2832 do_check_inuse_chunk (m, q);
2833 } else {
2834 assert (q == m->dv || bin_find (m, q));
2835 assert (lastq == 0 || cinuse (lastq)); /* Not 2 consecutive free */
2836 do_check_free_chunk (m, q);
2837 }
2838 lastq = q;
2839 q = next_chunk (q);
2840 }
2841 s = s->next;
2783 } 2842 }
2784 else { 2843 }
2785 assert(q == m->dv || bin_find(m, q)); 2844 return sum;
2786 assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ 2845 }
2787 do_check_free_chunk(m, q); 2846
2847 /* Check all properties of malloc_state. */
2848 static void
2849 do_check_malloc_state (mstate m)
2850 {
2851 bindex_t i;
2852 size_t total;
2853 /* check bins */
2854 for (i = 0; i < NSMALLBINS; ++i)
2855 do_check_smallbin (m, i);
2856 for (i = 0; i < NTREEBINS; ++i)
2857 do_check_treebin (m, i);
2858
2859 if (m->dvsize != 0) { /* check dv chunk */
2860 do_check_any_chunk (m, m->dv);
2861 assert (m->dvsize == chunksize (m->dv));
2862 assert (m->dvsize >= MIN_CHUNK_SIZE);
2863 assert (bin_find (m, m->dv) == 0);
2864 }
2865
2866 if (m->top != 0) { /* check top chunk */
2867 do_check_top_chunk (m, m->top);
2868 assert (m->topsize == chunksize (m->top));
2869 assert (m->topsize > 0);
2870 assert (bin_find (m, m->top) == 0);
2871 }
2872
2873 total = traverse_and_check (m);
2874 assert (total <= m->footprint);
2875 assert (m->footprint <= m->max_footprint);
2876 }
2877 #endif /* DEBUG */
2878
2879 /* ----------------------------- statistics ------------------------------ */
2880
2881 #if !NO_MALLINFO
2882 static struct mallinfo
2883 internal_mallinfo (mstate m)
2884 {
2885 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2886 if (!PREACTION (m)) {
2887 check_malloc_state (m);
2888 if (is_initialized (m)) {
2889 size_t nfree = SIZE_T_ONE; /* top always free */
2890 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2891 size_t sum = mfree;
2892 msegmentptr s = &m->seg;
2893 while (s != 0) {
2894 mchunkptr q = align_as_chunk (s->base);
2895 while (segment_holds (s, q) &&
2896 q != m->top && q->head != FENCEPOST_HEAD) {
2897 size_t sz = chunksize (q);
2898 sum += sz;
2899 if (!cinuse (q)) {
2900 mfree += sz;
2901 ++nfree;
2902 }
2903 q = next_chunk (q);
2904 }
2905 s = s->next;
2906 }
2907
2908 nm.arena = sum;
2909 nm.ordblks = nfree;
2910 nm.hblkhd = m->footprint - sum;
2911 nm.usmblks = m->max_footprint;
2912 nm.uordblks = m->footprint - mfree;
2913 nm.fordblks = mfree;
2914 nm.keepcost = m->topsize;
2788 } 2915 }
2789 lastq = q; 2916
2790 q = next_chunk(q); 2917 POSTACTION (m);
2791 } 2918 }
2792 s = s->next; 2919 return nm;
2793 } 2920 }
2794 } 2921 #endif /* !NO_MALLINFO */
2795 return sum; 2922
2796 } 2923 static void
2797 2924 internal_malloc_stats (mstate m)
2798 /* Check all properties of malloc_state. */ 2925 {
2799 static void do_check_malloc_state(mstate m) { 2926 if (!PREACTION (m)) {
2800 bindex_t i; 2927 size_t maxfp = 0;
2801 size_t total; 2928 size_t fp = 0;
2802 /* check bins */ 2929 size_t used = 0;
2803 for (i = 0; i < NSMALLBINS; ++i) 2930 check_malloc_state (m);
2804 do_check_smallbin(m, i); 2931 if (is_initialized (m)) {
2805 for (i = 0; i < NTREEBINS; ++i) 2932 msegmentptr s = &m->seg;
2806 do_check_treebin(m, i); 2933 maxfp = m->max_footprint;
2807 2934 fp = m->footprint;
2808 if (m->dvsize != 0) { /* check dv chunk */ 2935 used = fp - (m->topsize + TOP_FOOT_SIZE);
2809 do_check_any_chunk(m, m->dv); 2936
2810 assert(m->dvsize == chunksize(m->dv)); 2937 while (s != 0) {
2811 assert(m->dvsize >= MIN_CHUNK_SIZE); 2938 mchunkptr q = align_as_chunk (s->base);
2812 assert(bin_find(m, m->dv) == 0); 2939 while (segment_holds (s, q) &&
2813 } 2940 q != m->top && q->head != FENCEPOST_HEAD) {
2814 2941 if (!cinuse (q))
2815 if (m->top != 0) { /* check top chunk */ 2942 used -= chunksize (q);
2816 do_check_top_chunk(m, m->top); 2943 q = next_chunk (q);
2817 assert(m->topsize == chunksize(m->top)); 2944 }
2818 assert(m->topsize > 0); 2945 s = s->next;
2819 assert(bin_find(m, m->top) == 0); 2946 }
2820 }
2821
2822 total = traverse_and_check(m);
2823 assert(total <= m->footprint);
2824 assert(m->footprint <= m->max_footprint);
2825 }
2826 #endif /* DEBUG */
2827
2828 /* ----------------------------- statistics ------------------------------ */
2829
2830 #if !NO_MALLINFO
2831 static struct mallinfo internal_mallinfo(mstate m) {
2832 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2833 if (!PREACTION(m)) {
2834 check_malloc_state(m);
2835 if (is_initialized(m)) {
2836 size_t nfree = SIZE_T_ONE; /* top always free */
2837 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2838 size_t sum = mfree;
2839 msegmentptr s = &m->seg;
2840 while (s != 0) {
2841 mchunkptr q = align_as_chunk(s->base);
2842 while (segment_holds(s, q) &&
2843 q != m->top && q->head != FENCEPOST_HEAD) {
2844 size_t sz = chunksize(q);
2845 sum += sz;
2846 if (!cinuse(q)) {
2847 mfree += sz;
2848 ++nfree;
2849 }
2850 q = next_chunk(q);
2851 } 2947 }
2852 s = s->next;
2853 }
2854
2855 nm.arena = sum;
2856 nm.ordblks = nfree;
2857 nm.hblkhd = m->footprint - sum;
2858 nm.usmblks = m->max_footprint;
2859 nm.uordblks = m->footprint - mfree;
2860 nm.fordblks = mfree;
2861 nm.keepcost = m->topsize;
2862 }
2863
2864 POSTACTION(m);
2865 }
2866 return nm;
2867 }
2868 #endif /* !NO_MALLINFO */
2869
2870 static void internal_malloc_stats(mstate m) {
2871 if (!PREACTION(m)) {
2872 size_t maxfp = 0;
2873 size_t fp = 0;
2874 size_t used = 0;
2875 check_malloc_state(m);
2876 if (is_initialized(m)) {
2877 msegmentptr s = &m->seg;
2878 maxfp = m->max_footprint;
2879 fp = m->footprint;
2880 used = fp - (m->topsize + TOP_FOOT_SIZE);
2881
2882 while (s != 0) {
2883 mchunkptr q = align_as_chunk(s->base);
2884 while (segment_holds(s, q) &&
2885 q != m->top && q->head != FENCEPOST_HEAD) {
2886 if (!cinuse(q))
2887 used -= chunksize(q);
2888 q = next_chunk(q);
2889 }
2890 s = s->next;
2891 }
2892 }
2893
2894 #ifndef LACKS_STDIO_H 2948 #ifndef LACKS_STDIO_H
2895 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); 2949 fprintf (stderr, "max system bytes = %10lu\n",
2896 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); 2950 (unsigned long) (maxfp));
2897 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); 2951 fprintf (stderr, "system bytes = %10lu\n", (unsigned long) (fp));
2952 fprintf (stderr, "in use bytes = %10lu\n",
2953 (unsigned long) (used));
2898 #endif 2954 #endif
2899 2955
2900 POSTACTION(m); 2956 POSTACTION (m);
2901 } 2957 }
2902 } 2958 }
2903 2959
2904 /* ----------------------- Operations on smallbins ----------------------- */ 2960 /* ----------------------- Operations on smallbins ----------------------- */
2905 2961
2906 /* 2962 /*
3160 allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain 3216 allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
3161 the PINUSE bit so frees can be checked. 3217 the PINUSE bit so frees can be checked.
3162 */ 3218 */
3163 3219
3164 /* Malloc using mmap */ 3220 /* Malloc using mmap */
3165 static void* mmap_alloc(mstate m, size_t nb) { 3221 static void *
3166 size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); 3222 mmap_alloc (mstate m, size_t nb)
3167 if (mmsize > nb) { /* Check for wrap around 0 */ 3223 {
3168 char* mm = (char*)(DIRECT_MMAP(mmsize)); 3224 size_t mmsize =
3169 if (mm != CMFAIL) { 3225 granularity_align (nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3170 size_t offset = align_offset(chunk2mem(mm)); 3226 if (mmsize > nb) { /* Check for wrap around 0 */
3171 size_t psize = mmsize - offset - MMAP_FOOT_PAD; 3227 char *mm = (char *) (DIRECT_MMAP (mmsize));
3172 mchunkptr p = (mchunkptr)(mm + offset); 3228 if (mm != CMFAIL) {
3173 p->prev_foot = offset | IS_MMAPPED_BIT; 3229 size_t offset = align_offset (chunk2mem (mm));
3174 (p)->head = (psize|CINUSE_BIT); 3230 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3175 mark_inuse_foot(m, p, psize); 3231 mchunkptr p = (mchunkptr) (mm + offset);
3176 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; 3232 p->prev_foot = offset | IS_MMAPPED_BIT;
3177 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; 3233 (p)->head = (psize | CINUSE_BIT);
3178 3234 mark_inuse_foot (m, p, psize);
3179 if (mm < m->least_addr) 3235 chunk_plus_offset (p, psize)->head = FENCEPOST_HEAD;
3180 m->least_addr = mm; 3236 chunk_plus_offset (p, psize + SIZE_T_SIZE)->head = 0;
3181 if ((m->footprint += mmsize) > m->max_footprint) 3237
3182 m->max_footprint = m->footprint; 3238 if (mm < m->least_addr)
3183 assert(is_aligned(chunk2mem(p))); 3239 m->least_addr = mm;
3184 check_mmapped_chunk(m, p); 3240 if ((m->footprint += mmsize) > m->max_footprint)
3185 return chunk2mem(p); 3241 m->max_footprint = m->footprint;
3186 } 3242 assert (is_aligned (chunk2mem (p)));
3187 } 3243 check_mmapped_chunk (m, p);
3188 return 0; 3244 return chunk2mem (p);
3245 }
3246 }
3247 return 0;
3189 } 3248 }
3190 3249
3191 /* Realloc using mmap */ 3250 /* Realloc using mmap */
3192 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { 3251 static mchunkptr
3193 size_t oldsize = chunksize(oldp); 3252 mmap_resize (mstate m, mchunkptr oldp, size_t nb)
3194 if (is_small(nb)) /* Can't shrink mmap regions below small size */ 3253 {
3254 size_t oldsize = chunksize (oldp);
3255 if (is_small (nb)) /* Can't shrink mmap regions below small size */
3256 return 0;
3257 /* Keep old chunk if big enough but not too big */
3258 if (oldsize >= nb + SIZE_T_SIZE &&
3259 (oldsize - nb) <= (mparams.granularity << 1))
3260 return oldp;
3261 else {
3262 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3263 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3264 size_t newmmsize = granularity_align (nb + SIX_SIZE_T_SIZES +
3265 CHUNK_ALIGN_MASK);
3266 char *cp = (char *) CALL_MREMAP ((char *) oldp - offset,
3267 oldmmsize, newmmsize, 1);
3268 if (cp != CMFAIL) {
3269 mchunkptr newp = (mchunkptr) (cp + offset);
3270 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3271 newp->head = (psize | CINUSE_BIT);
3272 mark_inuse_foot (m, newp, psize);
3273 chunk_plus_offset (newp, psize)->head = FENCEPOST_HEAD;
3274 chunk_plus_offset (newp, psize + SIZE_T_SIZE)->head = 0;
3275
3276 if (cp < m->least_addr)
3277 m->least_addr = cp;
3278 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3279 m->max_footprint = m->footprint;
3280 check_mmapped_chunk (m, newp);
3281 return newp;
3282 }
3283 }
3195 return 0; 3284 return 0;
3196 /* Keep old chunk if big enough but not too big */
3197 if (oldsize >= nb + SIZE_T_SIZE &&
3198 (oldsize - nb) <= (mparams.granularity << 1))
3199 return oldp;
3200 else {
3201 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3202 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3203 size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
3204 CHUNK_ALIGN_MASK);
3205 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
3206 oldmmsize, newmmsize, 1);
3207 if (cp != CMFAIL) {
3208 mchunkptr newp = (mchunkptr)(cp + offset);
3209 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3210 newp->head = (psize|CINUSE_BIT);
3211 mark_inuse_foot(m, newp, psize);
3212 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3213 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
3214
3215 if (cp < m->least_addr)
3216 m->least_addr = cp;
3217 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3218 m->max_footprint = m->footprint;
3219 check_mmapped_chunk(m, newp);
3220 return newp;
3221 }
3222 }
3223 return 0;
3224 } 3285 }
3225 3286
3226 /* -------------------------- mspace management -------------------------- */ 3287 /* -------------------------- mspace management -------------------------- */
3227 3288
3228 /* Initialize top chunk and its size */ 3289 /* Initialize top chunk and its size */
3229 static void init_top(mstate m, mchunkptr p, size_t psize) { 3290 static void
3230 /* Ensure alignment */ 3291 init_top (mstate m, mchunkptr p, size_t psize)
3231 size_t offset = align_offset(chunk2mem(p)); 3292 {
3232 p = (mchunkptr)((char*)p + offset); 3293 /* Ensure alignment */
3233 psize -= offset; 3294 size_t offset = align_offset (chunk2mem (p));
3234 3295 p = (mchunkptr) ((char *) p + offset);
3235 m->top = p; 3296 psize -= offset;
3236 m->topsize = psize; 3297
3237 p->head = psize | PINUSE_BIT; 3298 m->top = p;
3238 /* set size of fake trailing chunk holding overhead space only once */ 3299 m->topsize = psize;
3239 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; 3300 p->head = psize | PINUSE_BIT;
3240 m->trim_check = mparams.trim_threshold; /* reset on each update */ 3301 /* set size of fake trailing chunk holding overhead space only once */
3302 chunk_plus_offset (p, psize)->head = TOP_FOOT_SIZE;
3303 m->trim_check = mparams.trim_threshold; /* reset on each update */
3241 } 3304 }
3242 3305
3243 /* Initialize bins for a new mstate that is otherwise zeroed out */ 3306 /* Initialize bins for a new mstate that is otherwise zeroed out */
3244 static void init_bins(mstate m) { 3307 static void
3245 /* Establish circular links for smallbins */ 3308 init_bins (mstate m)
3246 bindex_t i; 3309 {
3247 for (i = 0; i < NSMALLBINS; ++i) { 3310 /* Establish circular links for smallbins */
3248 sbinptr bin = smallbin_at(m,i); 3311 bindex_t i;
3249 bin->fd = bin->bk = bin; 3312 for (i = 0; i < NSMALLBINS; ++i) {
3250 } 3313 sbinptr bin = smallbin_at (m, i);
3314 bin->fd = bin->bk = bin;
3315 }
3251 } 3316 }
3252 3317
3253 #if PROCEED_ON_ERROR 3318 #if PROCEED_ON_ERROR
3254 3319
3255 /* default corruption action */ 3320 /* default corruption action */
3256 static void reset_on_error(mstate m) { 3321 static void
3257 int i; 3322 reset_on_error (mstate m)
3258 ++malloc_corruption_error_count; 3323 {
3259 /* Reinitialize fields to forget about all memory */ 3324 int i;
3260 m->smallbins = m->treebins = 0; 3325 ++malloc_corruption_error_count;
3261 m->dvsize = m->topsize = 0; 3326 /* Reinitialize fields to forget about all memory */
3262 m->seg.base = 0; 3327 m->smallbins = m->treebins = 0;
3263 m->seg.size = 0; 3328 m->dvsize = m->topsize = 0;
3264 m->seg.next = 0; 3329 m->seg.base = 0;
3265 m->top = m->dv = 0; 3330 m->seg.size = 0;
3266 for (i = 0; i < NTREEBINS; ++i) 3331 m->seg.next = 0;
3267 *treebin_at(m, i) = 0; 3332 m->top = m->dv = 0;
3268 init_bins(m); 3333 for (i = 0; i < NTREEBINS; ++i)
3334 *treebin_at (m, i) = 0;
3335 init_bins (m);
3269 } 3336 }
3270 #endif /* PROCEED_ON_ERROR */ 3337 #endif /* PROCEED_ON_ERROR */
3271 3338
3272 /* Allocate chunk and prepend remainder with chunk in successor base. */ 3339 /* Allocate chunk and prepend remainder with chunk in successor base. */
3273 static void* prepend_alloc(mstate m, char* newbase, char* oldbase, 3340 static void *
3274 size_t nb) { 3341 prepend_alloc (mstate m, char *newbase, char *oldbase, size_t nb)
3275 mchunkptr p = align_as_chunk(newbase); 3342 {
3276 mchunkptr oldfirst = align_as_chunk(oldbase); 3343 mchunkptr p = align_as_chunk (newbase);
3277 size_t psize = (char*)oldfirst - (char*)p; 3344 mchunkptr oldfirst = align_as_chunk (oldbase);
3278 mchunkptr q = chunk_plus_offset(p, nb); 3345 size_t psize = (char *) oldfirst - (char *) p;
3279 size_t qsize = psize - nb; 3346 mchunkptr q = chunk_plus_offset (p, nb);
3280 set_size_and_pinuse_of_inuse_chunk(m, p, nb); 3347 size_t qsize = psize - nb;
3281 3348 set_size_and_pinuse_of_inuse_chunk (m, p, nb);
3282 assert((char*)oldfirst > (char*)q); 3349
3283 assert(pinuse(oldfirst)); 3350 assert ((char *) oldfirst > (char *) q);
3284 assert(qsize >= MIN_CHUNK_SIZE); 3351 assert (pinuse (oldfirst));
3285 3352 assert (qsize >= MIN_CHUNK_SIZE);
3286 /* consolidate remainder with first chunk of old base */ 3353
3287 if (oldfirst == m->top) { 3354 /* consolidate remainder with first chunk of old base */
3288 size_t tsize = m->topsize += qsize; 3355 if (oldfirst == m->top) {
3289 m->top = q; 3356 size_t tsize = m->topsize += qsize;
3290 q->head = tsize | PINUSE_BIT; 3357 m->top = q;
3291 check_top_chunk(m, q); 3358 q->head = tsize | PINUSE_BIT;
3292 } 3359 check_top_chunk (m, q);
3293 else if (oldfirst == m->dv) { 3360 } else if (oldfirst == m->dv) {
3294 size_t dsize = m->dvsize += qsize; 3361 size_t dsize = m->dvsize += qsize;
3295 m->dv = q; 3362 m->dv = q;
3296 set_size_and_pinuse_of_free_chunk(q, dsize); 3363 set_size_and_pinuse_of_free_chunk (q, dsize);
3297 } 3364 } else {
3298 else { 3365 if (!cinuse (oldfirst)) {
3299 if (!cinuse(oldfirst)) { 3366 size_t nsize = chunksize (oldfirst);
3300 size_t nsize = chunksize(oldfirst); 3367 unlink_chunk (m, oldfirst, nsize);
3301 unlink_chunk(m, oldfirst, nsize); 3368 oldfirst = chunk_plus_offset (oldfirst, nsize);
3302 oldfirst = chunk_plus_offset(oldfirst, nsize); 3369 qsize += nsize;
3303 qsize += nsize; 3370 }
3304 } 3371 set_free_with_pinuse (q, qsize, oldfirst);
3305 set_free_with_pinuse(q, qsize, oldfirst); 3372 insert_chunk (m, q, qsize);
3306 insert_chunk(m, q, qsize); 3373 check_free_chunk (m, q);
3307 check_free_chunk(m, q); 3374 }
3308 } 3375
3309 3376 check_malloced_chunk (m, chunk2mem (p), nb);
3310 check_malloced_chunk(m, chunk2mem(p), nb); 3377 return chunk2mem (p);
3311 return chunk2mem(p);
3312 } 3378 }
3313 3379
3314 3380
3315 /* Add a segment to hold a new noncontiguous region */ 3381 /* Add a segment to hold a new noncontiguous region */
3316 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { 3382 static void
3317 /* Determine locations and sizes of segment, fenceposts, old top */ 3383 add_segment (mstate m, char *tbase, size_t tsize, flag_t mmapped)
3318 char* old_top = (char*)m->top; 3384 {
3319 msegmentptr oldsp = segment_holding(m, old_top); 3385 /* Determine locations and sizes of segment, fenceposts, old top */
3320 char* old_end = oldsp->base + oldsp->size; 3386 char *old_top = (char *) m->top;
3321 size_t ssize = pad_request(sizeof(struct malloc_segment)); 3387 msegmentptr oldsp = segment_holding (m, old_top);
3322 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); 3388 char *old_end = oldsp->base + oldsp->size;
3323 size_t offset = align_offset(chunk2mem(rawsp)); 3389 size_t ssize = pad_request (sizeof (struct malloc_segment));
3324 char* asp = rawsp + offset; 3390 char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3325 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; 3391 size_t offset = align_offset (chunk2mem (rawsp));
3326 mchunkptr sp = (mchunkptr)csp; 3392 char *asp = rawsp + offset;
3327 msegmentptr ss = (msegmentptr)(chunk2mem(sp)); 3393 char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
3328 mchunkptr tnext = chunk_plus_offset(sp, ssize); 3394 mchunkptr sp = (mchunkptr) csp;
3329 mchunkptr p = tnext; 3395 msegmentptr ss = (msegmentptr) (chunk2mem (sp));
3330 int nfences = 0; 3396 mchunkptr tnext = chunk_plus_offset (sp, ssize);
3331 3397 mchunkptr p = tnext;
3332 /* reset top to new space */ 3398 int nfences = 0;
3333 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); 3399
3334 3400 /* reset top to new space */
3335 /* Set up segment record */ 3401 init_top (m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3336 assert(is_aligned(ss)); 3402
3337 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); 3403 /* Set up segment record */
3338 *ss = m->seg; /* Push current record */ 3404 assert (is_aligned (ss));
3339 m->seg.base = tbase; 3405 set_size_and_pinuse_of_inuse_chunk (m, sp, ssize);
3340 m->seg.size = tsize; 3406 *ss = m->seg; /* Push current record */
3341 m->seg.sflags = mmapped; 3407 m->seg.base = tbase;
3342 m->seg.next = ss; 3408 m->seg.size = tsize;
3343 3409 m->seg.sflags = mmapped;
3344 /* Insert trailing fenceposts */ 3410 m->seg.next = ss;
3345 for (;;) { 3411
3346 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); 3412 /* Insert trailing fenceposts */
3347 p->head = FENCEPOST_HEAD; 3413 for (;;) {
3348 ++nfences; 3414 mchunkptr nextp = chunk_plus_offset (p, SIZE_T_SIZE);
3349 if ((char*)(&(nextp->head)) < old_end) 3415 p->head = FENCEPOST_HEAD;
3350 p = nextp; 3416 ++nfences;
3351 else 3417 if ((char *) (&(nextp->head)) < old_end)
3352 break; 3418 p = nextp;
3353 } 3419 else
3354 assert(nfences >= 2); 3420 break;
3355 3421 }
3356 /* Insert the rest of old top into a bin as an ordinary free chunk */ 3422 assert (nfences >= 2);
3357 if (csp != old_top) { 3423
3358 mchunkptr q = (mchunkptr)old_top; 3424 /* Insert the rest of old top into a bin as an ordinary free chunk */
3359 size_t psize = csp - old_top; 3425 if (csp != old_top) {
3360 mchunkptr tn = chunk_plus_offset(q, psize); 3426 mchunkptr q = (mchunkptr) old_top;
3361 set_free_with_pinuse(q, psize, tn); 3427 size_t psize = csp - old_top;
3362 insert_chunk(m, q, psize); 3428 mchunkptr tn = chunk_plus_offset (q, psize);
3363 } 3429 set_free_with_pinuse (q, psize, tn);
3364 3430 insert_chunk (m, q, psize);
3365 check_top_chunk(m, m->top); 3431 }
3432
3433 check_top_chunk (m, m->top);
3366 } 3434 }
3367 3435
3368 /* -------------------------- System allocation -------------------------- */ 3436 /* -------------------------- System allocation -------------------------- */
3369 3437
3370 /* Get memory from system using MORECORE or MMAP */ 3438 /* Get memory from system using MORECORE or MMAP */
3371 static void* sys_alloc(mstate m, size_t nb) { 3439 static void *
3372 char* tbase = CMFAIL; 3440 sys_alloc (mstate m, size_t nb)
3373 size_t tsize = 0; 3441 {
3374 flag_t mmap_flag = 0; 3442 char *tbase = CMFAIL;
3375 3443 size_t tsize = 0;
3376 init_mparams(); 3444 flag_t mmap_flag = 0;
3377 3445
3378 /* Directly map large chunks */ 3446 init_mparams ();
3379 if (use_mmap(m) && nb >= mparams.mmap_threshold) { 3447
3380 void* mem = mmap_alloc(m, nb); 3448 /* Directly map large chunks */
3381 if (mem != 0) 3449 if (use_mmap (m) && nb >= mparams.mmap_threshold) {
3382 return mem; 3450 void *mem = mmap_alloc (m, nb);
3383 } 3451 if (mem != 0)
3384 3452 return mem;
3385 /* 3453 }
3386 Try getting memory in any of three ways (in most-preferred to 3454
3387 least-preferred order): 3455 /*
3388 1. A call to MORECORE that can normally contiguously extend memory. 3456 Try getting memory in any of three ways (in most-preferred to
3457 least-preferred order):
3458 1. A call to MORECORE that can normally contiguously extend memory.
3389 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or 3459 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
3390 or main space is mmapped or a previous contiguous call failed) 3460 or main space is mmapped or a previous contiguous call failed)
3391 2. A call to MMAP new space (disabled if not HAVE_MMAP). 3461 2. A call to MMAP new space (disabled if not HAVE_MMAP).
3392 Note that under the default settings, if MORECORE is unable to 3462 Note that under the default settings, if MORECORE is unable to
3393 fulfill a request, and HAVE_MMAP is true, then mmap is 3463 fulfill a request, and HAVE_MMAP is true, then mmap is
3394 used as a noncontiguous system allocator. This is a useful backup 3464 used as a noncontiguous system allocator. This is a useful backup
3395 strategy for systems with holes in address spaces -- in this case 3465 strategy for systems with holes in address spaces -- in this case
3396 sbrk cannot contiguously expand the heap, but mmap may be able to 3466 sbrk cannot contiguously expand the heap, but mmap may be able to
3397 find space. 3467 find space.
3398 3. A call to MORECORE that cannot usually contiguously extend memory. 3468 3. A call to MORECORE that cannot usually contiguously extend memory.
3399 (disabled if not HAVE_MORECORE) 3469 (disabled if not HAVE_MORECORE)
3400 */ 3470 */
3401 3471
3402 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { 3472 if (MORECORE_CONTIGUOUS && !use_noncontiguous (m)) {
3403 char* br = CMFAIL; 3473 char *br = CMFAIL;
3404 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); 3474 msegmentptr ss =
3405 size_t asize = 0; 3475 (m->top == 0) ? 0 : segment_holding (m, (char *) m->top);
3406 ACQUIRE_MORECORE_LOCK(); 3476 size_t asize = 0;
3407 3477 ACQUIRE_MORECORE_LOCK ();
3408 if (ss == 0) { /* First time through or recovery */ 3478
3409 char* base = (char*)CALL_MORECORE(0); 3479 if (ss == 0) { /* First time through or recovery */
3410 if (base != CMFAIL) { 3480 char *base = (char *) CALL_MORECORE (0);
3411 asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); 3481 if (base != CMFAIL) {
3412 /* Adjust to end on a page boundary */ 3482 asize = granularity_align (nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3413 if (!is_page_aligned(base)) 3483 /* Adjust to end on a page boundary */
3414 asize += (page_align((size_t)base) - (size_t)base); 3484 if (!is_page_aligned (base))
3415 /* Can't call MORECORE if size is negative when treated as signed */ 3485 asize += (page_align ((size_t) base) - (size_t) base);
3416 if (asize < HALF_MAX_SIZE_T && 3486 /* Can't call MORECORE if size is negative when treated as signed */
3417 (br = (char*)(CALL_MORECORE(asize))) == base) { 3487 if (asize < HALF_MAX_SIZE_T &&
3418 tbase = base; 3488 (br = (char *) (CALL_MORECORE (asize))) == base) {
3419 tsize = asize; 3489 tbase = base;
3490 tsize = asize;
3491 }
3492 }
3493 } else {
3494 /* Subtract out existing available top space from MORECORE request. */
3495 asize =
3496 granularity_align (nb - m->topsize + TOP_FOOT_SIZE +
3497 SIZE_T_ONE);
3498 /* Use mem here only if it did continuously extend old space */
3499 if (asize < HALF_MAX_SIZE_T &&
3500 (br =
3501 (char *) (CALL_MORECORE (asize))) == ss->base + ss->size) {
3502 tbase = br;
3503 tsize = asize;
3504 }
3420 } 3505 }
3421 } 3506
3422 } 3507 if (tbase == CMFAIL) { /* Cope with partial failure */
3423 else { 3508 if (br != CMFAIL) { /* Try to use/extend the space we did get */
3424 /* Subtract out existing available top space from MORECORE request. */ 3509 if (asize < HALF_MAX_SIZE_T &&
3425 asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE); 3510 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
3426 /* Use mem here only if it did continuously extend old space */ 3511 size_t esize =
3427 if (asize < HALF_MAX_SIZE_T && 3512 granularity_align (nb + TOP_FOOT_SIZE +
3428 (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { 3513 SIZE_T_ONE - asize);
3429 tbase = br; 3514 if (esize < HALF_MAX_SIZE_T) {
3430 tsize = asize; 3515 char *end = (char *) CALL_MORECORE (esize);
3431 } 3516 if (end != CMFAIL)
3432 } 3517 asize += esize;
3433 3518 else { /* Can't use; try to release */
3434 if (tbase == CMFAIL) { /* Cope with partial failure */ 3519 end = (char *) CALL_MORECORE (-asize);
3435 if (br != CMFAIL) { /* Try to use/extend the space we did get */ 3520 br = CMFAIL;
3436 if (asize < HALF_MAX_SIZE_T && 3521 }
3437 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { 3522 }
3438 size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize); 3523 }
3439 if (esize < HALF_MAX_SIZE_T) {
3440 char* end = (char*)CALL_MORECORE(esize);
3441 if (end != CMFAIL)
3442 asize += esize;
3443 else { /* Can't use; try to release */
3444 end = (char*)CALL_MORECORE(-asize);
3445 br = CMFAIL;
3446 } 3524 }
3447 } 3525 if (br != CMFAIL) { /* Use the space we did get */
3526 tbase = br;
3527 tsize = asize;
3528 } else
3529 disable_contiguous (m); /* Don't try contiguous path in the future */
3448 } 3530 }
3449 } 3531
3450 if (br != CMFAIL) { /* Use the space we did get */ 3532 RELEASE_MORECORE_LOCK ();
3451 tbase = br; 3533 }
3452 tsize = asize; 3534
3453 } 3535 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
3454 else 3536 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
3455 disable_contiguous(m); /* Don't try contiguous path in the future */ 3537 size_t rsize = granularity_align (req);
3456 } 3538 if (rsize > nb) { /* Fail if wraps around zero */
3457 3539 char *mp = (char *) (CALL_MMAP (rsize));
3458 RELEASE_MORECORE_LOCK(); 3540 if (mp != CMFAIL) {
3459 } 3541 tbase = mp;
3460 3542 tsize = rsize;
3461 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ 3543 mmap_flag = IS_MMAPPED_BIT;
3462 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; 3544 }
3463 size_t rsize = granularity_align(req);
3464 if (rsize > nb) { /* Fail if wraps around zero */
3465 char* mp = (char*)(CALL_MMAP(rsize));
3466 if (mp != CMFAIL) {
3467 tbase = mp;
3468 tsize = rsize;
3469 mmap_flag = IS_MMAPPED_BIT;
3470 }
3471 }
3472 }
3473
3474 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
3475 size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3476 if (asize < HALF_MAX_SIZE_T) {
3477 char* br = CMFAIL;
3478 char* end = CMFAIL;
3479 ACQUIRE_MORECORE_LOCK();
3480 br = (char*)(CALL_MORECORE(asize));
3481 end = (char*)(CALL_MORECORE(0));
3482 RELEASE_MORECORE_LOCK();
3483 if (br != CMFAIL && end != CMFAIL && br < end) {
3484 size_t ssize = end - br;
3485 if (ssize > nb + TOP_FOOT_SIZE) {
3486 tbase = br;
3487 tsize = ssize;
3488 } 3545 }
3489 } 3546 }
3490 } 3547
3491 } 3548 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
3492 3549 size_t asize = granularity_align (nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3493 if (tbase != CMFAIL) { 3550 if (asize < HALF_MAX_SIZE_T) {
3494 3551 char *br = CMFAIL;
3495 if ((m->footprint += tsize) > m->max_footprint) 3552 char *end = CMFAIL;
3496 m->max_footprint = m->footprint; 3553 ACQUIRE_MORECORE_LOCK ();
3497 3554 br = (char *) (CALL_MORECORE (asize));
3498 if (!is_initialized(m)) { /* first-time initialization */ 3555 end = (char *) (CALL_MORECORE (0));
3499 m->seg.base = m->least_addr = tbase; 3556 RELEASE_MORECORE_LOCK ();
3500 m->seg.size = tsize; 3557 if (br != CMFAIL && end != CMFAIL && br < end) {
3501 m->seg.sflags = mmap_flag; 3558 size_t ssize = end - br;
3502 m->magic = mparams.magic; 3559 if (ssize > nb + TOP_FOOT_SIZE) {
3503 init_bins(m); 3560 tbase = br;
3504 if (is_global(m)) 3561 tsize = ssize;
3505 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); 3562 }
3506 else { 3563 }
3507 /* Offset top by embedded malloc_state */
3508 mchunkptr mn = next_chunk(mem2chunk(m));
3509 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
3510 }
3511 }
3512
3513 else {
3514 /* Try to merge with an existing segment */
3515 msegmentptr sp = &m->seg;
3516 while (sp != 0 && tbase != sp->base + sp->size)
3517 sp = sp->next;
3518 if (sp != 0 &&
3519 !is_extern_segment(sp) &&
3520 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
3521 segment_holds(sp, m->top)) { /* append */
3522 sp->size += tsize;
3523 init_top(m, m->top, m->topsize + tsize);
3524 }
3525 else {
3526 if (tbase < m->least_addr)
3527 m->least_addr = tbase;
3528 sp = &m->seg;
3529 while (sp != 0 && sp->base != tbase + tsize)
3530 sp = sp->next;
3531 if (sp != 0 &&
3532 !is_extern_segment(sp) &&
3533 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
3534 char* oldbase = sp->base;
3535 sp->base = tbase;
3536 sp->size += tsize;
3537 return prepend_alloc(m, tbase, oldbase, nb);
3538 } 3564 }
3539 else 3565 }
3540 add_segment(m, tbase, tsize, mmap_flag); 3566
3541 } 3567 if (tbase != CMFAIL) {
3542 } 3568
3543 3569 if ((m->footprint += tsize) > m->max_footprint)
3544 if (nb < m->topsize) { /* Allocate from new or extended top space */ 3570 m->max_footprint = m->footprint;
3545 size_t rsize = m->topsize -= nb; 3571
3546 mchunkptr p = m->top; 3572 if (!is_initialized (m)) { /* first-time initialization */
3547 mchunkptr r = m->top = chunk_plus_offset(p, nb); 3573 m->seg.base = m->least_addr = tbase;
3548 r->head = rsize | PINUSE_BIT; 3574 m->seg.size = tsize;
3549 set_size_and_pinuse_of_inuse_chunk(m, p, nb); 3575 m->seg.sflags = mmap_flag;
3550 check_top_chunk(m, m->top); 3576 m->magic = mparams.magic;
3551 check_malloced_chunk(m, chunk2mem(p), nb); 3577 init_bins (m);
3552 return chunk2mem(p); 3578 if (is_global (m))
3553 } 3579 init_top (m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3554 } 3580 else {
3555 3581 /* Offset top by embedded malloc_state */
3556 MALLOC_FAILURE_ACTION; 3582 mchunkptr mn = next_chunk (mem2chunk (m));
3557 return 0; 3583 init_top (m, mn,
3558 } 3584 (size_t) ((tbase + tsize) - (char *) mn) -
3559 3585 TOP_FOOT_SIZE);
3560 /* ----------------------- system deallocation -------------------------- */ 3586 }
3561
3562 /* Unmap and unlink any mmapped segments that don't contain used chunks */
3563 static size_t release_unused_segments(mstate m) {
3564 size_t released = 0;
3565 msegmentptr pred = &m->seg;
3566 msegmentptr sp = pred->next;
3567 while (sp != 0) {
3568 char* base = sp->base;
3569 size_t size = sp->size;
3570 msegmentptr next = sp->next;
3571 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
3572 mchunkptr p = align_as_chunk(base);
3573 size_t psize = chunksize(p);
3574 /* Can unmap if first chunk holds entire segment and not pinned */
3575 if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
3576 tchunkptr tp = (tchunkptr)p;
3577 assert(segment_holds(sp, (char*)sp));
3578 if (p == m->dv) {
3579 m->dv = 0;
3580 m->dvsize = 0;
3581 } 3587 }
3588
3582 else { 3589 else {
3583 unlink_large_chunk(m, tp); 3590 /* Try to merge with an existing segment */
3591 msegmentptr sp = &m->seg;
3592 while (sp != 0 && tbase != sp->base + sp->size)
3593 sp = sp->next;
3594 if (sp != 0 && !is_extern_segment (sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds (sp, m->top)) { /* append */
3595 sp->size += tsize;
3596 init_top (m, m->top, m->topsize + tsize);
3597 } else {
3598 if (tbase < m->least_addr)
3599 m->least_addr = tbase;
3600 sp = &m->seg;
3601 while (sp != 0 && sp->base != tbase + tsize)
3602 sp = sp->next;
3603 if (sp != 0 &&
3604 !is_extern_segment (sp) &&
3605 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
3606 char *oldbase = sp->base;
3607 sp->base = tbase;
3608 sp->size += tsize;
3609 return prepend_alloc (m, tbase, oldbase, nb);
3610 } else
3611 add_segment (m, tbase, tsize, mmap_flag);
3612 }
3584 } 3613 }
3585 if (CALL_MUNMAP(base, size) == 0) { 3614
3586 released += size; 3615 if (nb < m->topsize) { /* Allocate from new or extended top space */
3587 m->footprint -= size; 3616 size_t rsize = m->topsize -= nb;
3588 /* unlink obsoleted record */ 3617 mchunkptr p = m->top;
3589 sp = pred; 3618 mchunkptr r = m->top = chunk_plus_offset (p, nb);
3590 sp->next = next; 3619 r->head = rsize | PINUSE_BIT;
3620 set_size_and_pinuse_of_inuse_chunk (m, p, nb);
3621 check_top_chunk (m, m->top);
3622 check_malloced_chunk (m, chunk2mem (p), nb);
3623 return chunk2mem (p);
3591 } 3624 }
3592 else { /* back out if cannot unmap */ 3625 }
3593 insert_large_chunk(m, tp, psize); 3626
3594 }
3595 }
3596 }
3597 pred = sp;
3598 sp = next;
3599 }
3600 return released;
3601 }
3602
3603 static int sys_trim(mstate m, size_t pad) {
3604 size_t released = 0;
3605 if (pad < MAX_REQUEST && is_initialized(m)) {
3606 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
3607
3608 if (m->topsize > pad) {
3609 /* Shrink top space in granularity-size units, keeping at least one */
3610 size_t unit = mparams.granularity;
3611 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
3612 SIZE_T_ONE) * unit;
3613 msegmentptr sp = segment_holding(m, (char*)m->top);
3614
3615 if (!is_extern_segment(sp)) {
3616 if (is_mmapped_segment(sp)) {
3617 if (HAVE_MMAP &&
3618 sp->size >= extra &&
3619 !has_segment_link(m, sp)) { /* can't shrink if pinned */
3620 size_t newsize = sp->size - extra;
3621 /* Prefer mremap, fall back to munmap */
3622 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
3623 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
3624 released = extra;
3625 }
3626 }
3627 }
3628 else if (HAVE_MORECORE) {
3629 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
3630 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3631 ACQUIRE_MORECORE_LOCK();
3632 {
3633 /* Make sure end of memory is where we last set it. */
3634 char* old_br = (char*)(CALL_MORECORE(0));
3635 if (old_br == sp->base + sp->size) {
3636 char* rel_br = (char*)(CALL_MORECORE(-extra));
3637 char* new_br = (char*)(CALL_MORECORE(0));
3638 if (rel_br != CMFAIL && new_br < old_br)
3639 released = old_br - new_br;
3640 }
3641 }
3642 RELEASE_MORECORE_LOCK();
3643 }
3644 }
3645
3646 if (released != 0) {
3647 sp->size -= released;
3648 m->footprint -= released;
3649 init_top(m, m->top, m->topsize - released);
3650 check_top_chunk(m, m->top);
3651 }
3652 }
3653
3654 /* Unmap any unused mmapped segments */
3655 if (HAVE_MMAP)
3656 released += release_unused_segments(m);
3657
3658 /* On failure, disable autotrim to avoid repeated failed future calls */
3659 if (released == 0)
3660 m->trim_check = MAX_SIZE_T;
3661 }
3662
3663 return (released != 0)? 1 : 0;
3664 }
3665
3666 /* ---------------------------- malloc support --------------------------- */
3667
3668 /* allocate a large request from the best fitting chunk in a treebin */
3669 static void* tmalloc_large(mstate m, size_t nb) {
3670 tchunkptr v = 0;
3671 size_t rsize = -nb; /* Unsigned negation */
3672 tchunkptr t;
3673 bindex_t idx;
3674 compute_tree_index(nb, idx);
3675
3676 if ((t = *treebin_at(m, idx)) != 0) {
3677 /* Traverse tree for this bin looking for node with size == nb */
3678 size_t sizebits = nb << leftshift_for_tree_index(idx);
3679 tchunkptr rst = 0; /* The deepest untaken right subtree */
3680 for (;;) {
3681 tchunkptr rt;
3682 size_t trem = chunksize(t) - nb;
3683 if (trem < rsize) {
3684 v = t;
3685 if ((rsize = trem) == 0)
3686 break;
3687 }
3688 rt = t->child[1];
3689 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3690 if (rt != 0 && rt != t)
3691 rst = rt;
3692 if (t == 0) {
3693 t = rst; /* set t to least subtree holding sizes > nb */
3694 break;
3695 }
3696 sizebits <<= 1;
3697 }
3698 }
3699
3700 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3701 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3702 if (leftbits != 0) {
3703 bindex_t i;
3704 binmap_t leastbit = least_bit(leftbits);
3705 compute_bit2idx(leastbit, i);
3706 t = *treebin_at(m, i);
3707 }
3708 }
3709
3710 while (t != 0) { /* find smallest of tree or subtree */
3711 size_t trem = chunksize(t) - nb;
3712 if (trem < rsize) {
3713 rsize = trem;
3714 v = t;
3715 }
3716 t = leftmost_child(t);
3717 }
3718
3719 /* If dv is a better fit, return 0 so malloc will use it */
3720 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
3721 if (RTCHECK(ok_address(m, v))) { /* split */
3722 mchunkptr r = chunk_plus_offset(v, nb);
3723 assert(chunksize(v) == rsize + nb);
3724 if (RTCHECK(ok_next(v, r))) {
3725 unlink_large_chunk(m, v);
3726 if (rsize < MIN_CHUNK_SIZE)
3727 set_inuse_and_pinuse(m, v, (rsize + nb));
3728 else {
3729 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3730 set_size_and_pinuse_of_free_chunk(r, rsize);
3731 insert_chunk(m, r, rsize);
3732 }
3733 return chunk2mem(v);
3734 }
3735 }
3736 CORRUPTION_ERROR_ACTION(m);
3737 }
3738 return 0;
3739 }
3740
3741 /* allocate a small request from the best fitting chunk in a treebin */
3742 static void* tmalloc_small(mstate m, size_t nb) {
3743 tchunkptr t, v;
3744 size_t rsize;
3745 bindex_t i;
3746 binmap_t leastbit = least_bit(m->treemap);
3747 compute_bit2idx(leastbit, i);
3748
3749 v = t = *treebin_at(m, i);
3750 rsize = chunksize(t) - nb;
3751
3752 while ((t = leftmost_child(t)) != 0) {
3753 size_t trem = chunksize(t) - nb;
3754 if (trem < rsize) {
3755 rsize = trem;
3756 v = t;
3757 }
3758 }
3759
3760 if (RTCHECK(ok_address(m, v))) {
3761 mchunkptr r = chunk_plus_offset(v, nb);
3762 assert(chunksize(v) == rsize + nb);
3763 if (RTCHECK(ok_next(v, r))) {
3764 unlink_large_chunk(m, v);
3765 if (rsize < MIN_CHUNK_SIZE)
3766 set_inuse_and_pinuse(m, v, (rsize + nb));
3767 else {
3768 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3769 set_size_and_pinuse_of_free_chunk(r, rsize);
3770 replace_dv(m, r, rsize);
3771 }
3772 return chunk2mem(v);
3773 }
3774 }
3775
3776 CORRUPTION_ERROR_ACTION(m);
3777 return 0;
3778 }
3779
3780 /* --------------------------- realloc support --------------------------- */
3781
3782 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
3783 if (bytes >= MAX_REQUEST) {
3784 MALLOC_FAILURE_ACTION; 3627 MALLOC_FAILURE_ACTION;
3785 return 0; 3628 return 0;
3786 } 3629 }
3787 if (!PREACTION(m)) { 3630
3788 mchunkptr oldp = mem2chunk(oldmem); 3631 /* ----------------------- system deallocation -------------------------- */
3789 size_t oldsize = chunksize(oldp); 3632
3790 mchunkptr next = chunk_plus_offset(oldp, oldsize); 3633 /* Unmap and unlink any mmapped segments that don't contain used chunks */
3791 mchunkptr newp = 0; 3634 static size_t
3792 void* extra = 0; 3635 release_unused_segments (mstate m)
3793 3636 {
3794 /* Try to either shrink or extend into top. Else malloc-copy-free */ 3637 size_t released = 0;
3795 3638 msegmentptr pred = &m->seg;
3796 if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && 3639 msegmentptr sp = pred->next;
3797 ok_next(oldp, next) && ok_pinuse(next))) { 3640 while (sp != 0) {
3798 size_t nb = request2size(bytes); 3641 char *base = sp->base;
3799 if (is_mmapped(oldp)) 3642 size_t size = sp->size;
3800 newp = mmap_resize(m, oldp, nb); 3643 msegmentptr next = sp->next;
3801 else if (oldsize >= nb) { /* already big enough */ 3644 if (is_mmapped_segment (sp) && !is_extern_segment (sp)) {
3802 size_t rsize = oldsize - nb; 3645 mchunkptr p = align_as_chunk (base);
3803 newp = oldp; 3646 size_t psize = chunksize (p);
3804 if (rsize >= MIN_CHUNK_SIZE) { 3647 /* Can unmap if first chunk holds entire segment and not pinned */
3805 mchunkptr remainder = chunk_plus_offset(newp, nb); 3648 if (!cinuse (p)
3806 set_inuse(m, newp, nb); 3649 && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
3807 set_inuse(m, remainder, rsize); 3650 tchunkptr tp = (tchunkptr) p;
3808 extra = chunk2mem(remainder); 3651 assert (segment_holds (sp, (char *) sp));
3652 if (p == m->dv) {
3653 m->dv = 0;
3654 m->dvsize = 0;
3655 } else {
3656 unlink_large_chunk (m, tp);
3657 }
3658 if (CALL_MUNMAP (base, size) == 0) {
3659 released += size;
3660 m->footprint -= size;
3661 /* unlink obsoleted record */
3662 sp = pred;
3663 sp->next = next;
3664 } else { /* back out if cannot unmap */
3665 insert_large_chunk (m, tp, psize);
3666 }
3667 }
3809 } 3668 }
3810 } 3669 pred = sp;
3811 else if (next == m->top && oldsize + m->topsize > nb) { 3670 sp = next;
3812 /* Expand into top */ 3671 }
3813 size_t newsize = oldsize + m->topsize; 3672 return released;
3814 size_t newtopsize = newsize - nb; 3673 }
3815 mchunkptr newtop = chunk_plus_offset(oldp, nb); 3674
3816 set_inuse(m, oldp, nb); 3675 static int
3817 newtop->head = newtopsize |PINUSE_BIT; 3676 sys_trim (mstate m, size_t pad)
3818 m->top = newtop; 3677 {
3819 m->topsize = newtopsize; 3678 size_t released = 0;
3820 newp = oldp; 3679 if (pad < MAX_REQUEST && is_initialized (m)) {
3821 } 3680 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
3822 } 3681
3823 else { 3682 if (m->topsize > pad) {
3824 USAGE_ERROR_ACTION(m, oldmem); 3683 /* Shrink top space in granularity-size units, keeping at least one */
3825 POSTACTION(m); 3684 size_t unit = mparams.granularity;
3826 return 0; 3685 size_t extra =
3827 } 3686 ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
3828 3687 SIZE_T_ONE) * unit;
3829 POSTACTION(m); 3688 msegmentptr sp = segment_holding (m, (char *) m->top);
3830 3689
3831 if (newp != 0) { 3690 if (!is_extern_segment (sp)) {
3832 if (extra != 0) { 3691 if (is_mmapped_segment (sp)) {
3833 internal_free(m, extra); 3692 if (HAVE_MMAP && sp->size >= extra && !has_segment_link (m, sp)) { /* can't shrink if pinned */
3834 } 3693 size_t newsize = sp->size - extra;
3835 check_inuse_chunk(m, newp); 3694 /* Prefer mremap, fall back to munmap */
3836 return chunk2mem(newp); 3695 if ((CALL_MREMAP
3837 } 3696 (sp->base, sp->size, newsize, 0) != MFAIL)
3838 else { 3697 || (CALL_MUNMAP (sp->base + newsize, extra)
3839 void* newmem = internal_malloc(m, bytes); 3698 == 0)) {
3840 if (newmem != 0) { 3699 released = extra;
3841 size_t oc = oldsize - overhead_for(oldp); 3700 }
3842 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); 3701 }
3843 internal_free(m, oldmem); 3702 } else if (HAVE_MORECORE) {
3844 } 3703 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
3845 return newmem; 3704 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3846 } 3705 ACQUIRE_MORECORE_LOCK ();
3847 } 3706 {
3848 return 0; 3707 /* Make sure end of memory is where we last set it. */
3708 char *old_br = (char *) (CALL_MORECORE (0));
3709 if (old_br == sp->base + sp->size) {
3710 char *rel_br = (char *) (CALL_MORECORE (-extra));
3711 char *new_br = (char *) (CALL_MORECORE (0));
3712 if (rel_br != CMFAIL && new_br < old_br)
3713 released = old_br - new_br;
3714 }
3715 }
3716 RELEASE_MORECORE_LOCK ();
3717 }
3718 }
3719
3720 if (released != 0) {
3721 sp->size -= released;
3722 m->footprint -= released;
3723 init_top (m, m->top, m->topsize - released);
3724 check_top_chunk (m, m->top);
3725 }
3726 }
3727
3728 /* Unmap any unused mmapped segments */
3729 if (HAVE_MMAP)
3730 released += release_unused_segments (m);
3731
3732 /* On failure, disable autotrim to avoid repeated failed future calls */
3733 if (released == 0)
3734 m->trim_check = MAX_SIZE_T;
3735 }
3736
3737 return (released != 0) ? 1 : 0;
3738 }
3739
3740 /* ---------------------------- malloc support --------------------------- */
3741
3742 /* allocate a large request from the best fitting chunk in a treebin */
3743 static void *
3744 tmalloc_large (mstate m, size_t nb)
3745 {
3746 tchunkptr v = 0;
3747 size_t rsize = -nb; /* Unsigned negation */
3748 tchunkptr t;
3749 bindex_t idx;
3750 compute_tree_index (nb, idx);
3751
3752 if ((t = *treebin_at (m, idx)) != 0) {
3753 /* Traverse tree for this bin looking for node with size == nb */
3754 size_t sizebits = nb << leftshift_for_tree_index (idx);
3755 tchunkptr rst = 0; /* The deepest untaken right subtree */
3756 for (;;) {
3757 tchunkptr rt;
3758 size_t trem = chunksize (t) - nb;
3759 if (trem < rsize) {
3760 v = t;
3761 if ((rsize = trem) == 0)
3762 break;
3763 }
3764 rt = t->child[1];
3765 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
3766 if (rt != 0 && rt != t)
3767 rst = rt;
3768 if (t == 0) {
3769 t = rst; /* set t to least subtree holding sizes > nb */
3770 break;
3771 }
3772 sizebits <<= 1;
3773 }
3774 }
3775
3776 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3777 binmap_t leftbits = left_bits (idx2bit (idx)) & m->treemap;
3778 if (leftbits != 0) {
3779 bindex_t i;
3780 binmap_t leastbit = least_bit (leftbits);
3781 compute_bit2idx (leastbit, i);
3782 t = *treebin_at (m, i);
3783 }
3784 }
3785
3786 while (t != 0) { /* find smallest of tree or subtree */
3787 size_t trem = chunksize (t) - nb;
3788 if (trem < rsize) {
3789 rsize = trem;
3790 v = t;
3791 }
3792 t = leftmost_child (t);
3793 }
3794
3795 /* If dv is a better fit, return 0 so malloc will use it */
3796 if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
3797 if (RTCHECK (ok_address (m, v))) { /* split */
3798 mchunkptr r = chunk_plus_offset (v, nb);
3799 assert (chunksize (v) == rsize + nb);
3800 if (RTCHECK (ok_next (v, r))) {
3801 unlink_large_chunk (m, v);
3802 if (rsize < MIN_CHUNK_SIZE)
3803 set_inuse_and_pinuse (m, v, (rsize + nb));
3804 else {
3805 set_size_and_pinuse_of_inuse_chunk (m, v, nb);
3806 set_size_and_pinuse_of_free_chunk (r, rsize);
3807 insert_chunk (m, r, rsize);
3808 }
3809 return chunk2mem (v);
3810 }
3811 }
3812 CORRUPTION_ERROR_ACTION (m);
3813 }
3814 return 0;
3815 }
3816
3817 /* allocate a small request from the best fitting chunk in a treebin */
3818 static void *
3819 tmalloc_small (mstate m, size_t nb)
3820 {
3821 tchunkptr t, v;
3822 size_t rsize;
3823 bindex_t i;
3824 binmap_t leastbit = least_bit (m->treemap);
3825 compute_bit2idx (leastbit, i);
3826
3827 v = t = *treebin_at (m, i);
3828 rsize = chunksize (t) - nb;
3829
3830 while ((t = leftmost_child (t)) != 0) {
3831 size_t trem = chunksize (t) - nb;
3832 if (trem < rsize) {
3833 rsize = trem;
3834 v = t;
3835 }
3836 }
3837
3838 if (RTCHECK (ok_address (m, v))) {
3839 mchunkptr r = chunk_plus_offset (v, nb);
3840 assert (chunksize (v) == rsize + nb);
3841 if (RTCHECK (ok_next (v, r))) {
3842 unlink_large_chunk (m, v);
3843 if (rsize < MIN_CHUNK_SIZE)
3844 set_inuse_and_pinuse (m, v, (rsize + nb));
3845 else {
3846 set_size_and_pinuse_of_inuse_chunk (m, v, nb);
3847 set_size_and_pinuse_of_free_chunk (r, rsize);
3848 replace_dv (m, r, rsize);
3849 }
3850 return chunk2mem (v);
3851 }
3852 }
3853
3854 CORRUPTION_ERROR_ACTION (m);
3855 return 0;
3856 }
3857
3858 /* --------------------------- realloc support --------------------------- */
3859
3860 static void *
3861 internal_realloc (mstate m, void *oldmem, size_t bytes)
3862 {
3863 if (bytes >= MAX_REQUEST) {
3864 MALLOC_FAILURE_ACTION;
3865 return 0;
3866 }
3867 if (!PREACTION (m)) {
3868 mchunkptr oldp = mem2chunk (oldmem);
3869 size_t oldsize = chunksize (oldp);
3870 mchunkptr next = chunk_plus_offset (oldp, oldsize);
3871 mchunkptr newp = 0;
3872 void *extra = 0;
3873
3874 /* Try to either shrink or extend into top. Else malloc-copy-free */
3875
3876 if (RTCHECK (ok_address (m, oldp) && ok_cinuse (oldp) &&
3877 ok_next (oldp, next) && ok_pinuse (next))) {
3878 size_t nb = request2size (bytes);
3879 if (is_mmapped (oldp))
3880 newp = mmap_resize (m, oldp, nb);
3881 else if (oldsize >= nb) { /* already big enough */
3882 size_t rsize = oldsize - nb;
3883 newp = oldp;
3884 if (rsize >= MIN_CHUNK_SIZE) {
3885 mchunkptr remainder = chunk_plus_offset (newp, nb);
3886 set_inuse (m, newp, nb);
3887 set_inuse (m, remainder, rsize);
3888 extra = chunk2mem (remainder);
3889 }
3890 } else if (next == m->top && oldsize + m->topsize > nb) {
3891 /* Expand into top */
3892 size_t newsize = oldsize + m->topsize;
3893 size_t newtopsize = newsize - nb;
3894 mchunkptr newtop = chunk_plus_offset (oldp, nb);
3895 set_inuse (m, oldp, nb);
3896 newtop->head = newtopsize | PINUSE_BIT;
3897 m->top = newtop;
3898 m->topsize = newtopsize;
3899 newp = oldp;
3900 }
3901 } else {
3902 USAGE_ERROR_ACTION (m, oldmem);
3903 POSTACTION (m);
3904 return 0;
3905 }
3906
3907 POSTACTION (m);
3908
3909 if (newp != 0) {
3910 if (extra != 0) {
3911 internal_free (m, extra);
3912 }
3913 check_inuse_chunk (m, newp);
3914 return chunk2mem (newp);
3915 } else {
3916 void *newmem = internal_malloc (m, bytes);
3917 if (newmem != 0) {
3918 size_t oc = oldsize - overhead_for (oldp);
3919 memcpy (newmem, oldmem, (oc < bytes) ? oc : bytes);
3920 internal_free (m, oldmem);
3921 }
3922 return newmem;
3923 }
3924 }
3925 return 0;
3849 } 3926 }
3850 3927
3851 /* --------------------------- memalign support -------------------------- */ 3928 /* --------------------------- memalign support -------------------------- */
3852 3929
3853 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { 3930 static void *
3854 if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ 3931 internal_memalign (mstate m, size_t alignment, size_t bytes)
3855 return internal_malloc(m, bytes); 3932 {
3856 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ 3933 if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
3857 alignment = MIN_CHUNK_SIZE; 3934 return internal_malloc (m, bytes);
3858 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ 3935 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3859 size_t a = MALLOC_ALIGNMENT << 1; 3936 alignment = MIN_CHUNK_SIZE;
3860 while (a < alignment) a <<= 1; 3937 if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */
3861 alignment = a; 3938 size_t a = MALLOC_ALIGNMENT << 1;
3862 } 3939 while (a < alignment)
3863 3940 a <<= 1;
3864 if (bytes >= MAX_REQUEST - alignment) { 3941 alignment = a;
3865 if (m != 0) { /* Test isn't needed but avoids compiler warning */ 3942 }
3866 MALLOC_FAILURE_ACTION; 3943
3867 } 3944 if (bytes >= MAX_REQUEST - alignment) {
3868 } 3945 if (m != 0) { /* Test isn't needed but avoids compiler warning */
3869 else { 3946 MALLOC_FAILURE_ACTION;
3870 size_t nb = request2size(bytes);
3871 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3872 char* mem = (char*)internal_malloc(m, req);
3873 if (mem != 0) {
3874 void* leader = 0;
3875 void* trailer = 0;
3876 mchunkptr p = mem2chunk(mem);
3877
3878 if (PREACTION(m)) return 0;
3879 if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
3880 /*
3881 Find an aligned spot inside chunk. Since we need to give
3882 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3883 the first calculation places us at a spot with less than
3884 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3885 We've allocated enough total room so that this is always
3886 possible.
3887 */
3888 char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
3889 alignment -
3890 SIZE_T_ONE)) &
3891 -alignment));
3892 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
3893 br : br+alignment;
3894 mchunkptr newp = (mchunkptr)pos;
3895 size_t leadsize = pos - (char*)(p);
3896 size_t newsize = chunksize(p) - leadsize;
3897
3898 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3899 newp->prev_foot = p->prev_foot + leadsize;
3900 newp->head = (newsize|CINUSE_BIT);
3901 } 3947 }
3902 else { /* Otherwise, give back leader, use the rest */ 3948 } else {
3903 set_inuse(m, newp, newsize); 3949 size_t nb = request2size (bytes);
3904 set_inuse(m, p, leadsize); 3950 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3905 leader = chunk2mem(p); 3951 char *mem = (char *) internal_malloc (m, req);
3952 if (mem != 0) {
3953 void *leader = 0;
3954 void *trailer = 0;
3955 mchunkptr p = mem2chunk (mem);
3956
3957 if (PREACTION (m))
3958 return 0;
3959 if ((((size_t) (mem)) % alignment) != 0) { /* misaligned */
3960 /*
3961 Find an aligned spot inside chunk. Since we need to give
3962 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3963 the first calculation places us at a spot with less than
3964 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3965 We've allocated enough total room so that this is always
3966 possible.
3967 */
3968 char *br = (char *)
3969 mem2chunk ((size_t)
3970 (((size_t)
3971 (mem + alignment -
3972 SIZE_T_ONE)) & -alignment));
3973 char *pos =
3974 ((size_t) (br - (char *) (p)) >=
3975 MIN_CHUNK_SIZE) ? br : br + alignment;
3976 mchunkptr newp = (mchunkptr) pos;
3977 size_t leadsize = pos - (char *) (p);
3978 size_t newsize = chunksize (p) - leadsize;
3979
3980 if (is_mmapped (p)) { /* For mmapped chunks, just adjust offset */
3981 newp->prev_foot = p->prev_foot + leadsize;
3982 newp->head = (newsize | CINUSE_BIT);
3983 } else { /* Otherwise, give back leader, use the rest */
3984 set_inuse (m, newp, newsize);
3985 set_inuse (m, p, leadsize);
3986 leader = chunk2mem (p);
3987 }
3988 p = newp;
3989 }
3990
3991 /* Give back spare room at the end */
3992 if (!is_mmapped (p)) {
3993 size_t size = chunksize (p);
3994 if (size > nb + MIN_CHUNK_SIZE) {
3995 size_t remainder_size = size - nb;
3996 mchunkptr remainder = chunk_plus_offset (p, nb);
3997 set_inuse (m, p, nb);
3998 set_inuse (m, remainder, remainder_size);
3999 trailer = chunk2mem (remainder);
4000 }
4001 }
4002
4003 assert (chunksize (p) >= nb);
4004 assert ((((size_t) (chunk2mem (p))) % alignment) == 0);
4005 check_inuse_chunk (m, p);
4006 POSTACTION (m);
4007 if (leader != 0) {
4008 internal_free (m, leader);
4009 }
4010 if (trailer != 0) {
4011 internal_free (m, trailer);
4012 }
4013 return chunk2mem (p);
3906 } 4014 }
3907 p = newp; 4015 }
3908 } 4016 return 0;
3909 4017 }
3910 /* Give back spare room at the end */ 4018
3911 if (!is_mmapped(p)) { 4019 /* ------------------------ comalloc/coalloc support --------------------- */
3912 size_t size = chunksize(p); 4020
3913 if (size > nb + MIN_CHUNK_SIZE) { 4021 static void **
3914 size_t remainder_size = size - nb; 4022 ialloc (mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
3915 mchunkptr remainder = chunk_plus_offset(p, nb); 4023 {
3916 set_inuse(m, p, nb); 4024 /*
3917 set_inuse(m, remainder, remainder_size); 4025 This provides common support for independent_X routines, handling
3918 trailer = chunk2mem(remainder); 4026 all of the combinations that can result.
4027
4028 The opts arg has:
4029 bit 0 set if all elements are same size (using sizes[0])
4030 bit 1 set if elements should be zeroed
4031 */
4032
4033 size_t element_size; /* chunksize of each element, if all same */
4034 size_t contents_size; /* total size of elements */
4035 size_t array_size; /* request size of pointer array */
4036 void *mem; /* malloced aggregate space */
4037 mchunkptr p; /* corresponding chunk */
4038 size_t remainder_size; /* remaining bytes while splitting */
4039 void **marray; /* either "chunks" or malloced ptr array */
4040 mchunkptr array_chunk; /* chunk for malloced ptr array */
4041 flag_t was_enabled; /* to disable mmap */
4042 size_t size;
4043 size_t i;
4044
4045 /* compute array length, if needed */
4046 if (chunks != 0) {
4047 if (n_elements == 0)
4048 return chunks; /* nothing to do */
4049 marray = chunks;
4050 array_size = 0;
4051 } else {
4052 /* if empty req, must still return chunk representing empty array */
4053 if (n_elements == 0)
4054 return (void **) internal_malloc (m, 0);
4055 marray = 0;
4056 array_size = request2size (n_elements * (sizeof (void *)));
4057 }
4058
4059 /* compute total element size */
4060 if (opts & 0x1) { /* all-same-size */
4061 element_size = request2size (*sizes);
4062 contents_size = n_elements * element_size;
4063 } else { /* add up all the sizes */
4064 element_size = 0;
4065 contents_size = 0;
4066 for (i = 0; i != n_elements; ++i)
4067 contents_size += request2size (sizes[i]);
4068 }
4069
4070 size = contents_size + array_size;
4071
4072 /*
4073 Allocate the aggregate chunk. First disable direct-mmapping so
4074 malloc won't use it, since we would not be able to later
4075 free/realloc space internal to a segregated mmap region.
4076 */
4077 was_enabled = use_mmap (m);
4078 disable_mmap (m);
4079 mem = internal_malloc (m, size - CHUNK_OVERHEAD);
4080 if (was_enabled)
4081 enable_mmap (m);
4082 if (mem == 0)
4083 return 0;
4084
4085 if (PREACTION (m))
4086 return 0;
4087 p = mem2chunk (mem);
4088 remainder_size = chunksize (p);
4089
4090 assert (!is_mmapped (p));
4091
4092 if (opts & 0x2) { /* optionally clear the elements */
4093 memset ((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4094 }
4095
4096 /* If not provided, allocate the pointer array as final part of chunk */
4097 if (marray == 0) {
4098 size_t array_chunk_size;
4099 array_chunk = chunk_plus_offset (p, contents_size);
4100 array_chunk_size = remainder_size - contents_size;
4101 marray = (void **) (chunk2mem (array_chunk));
4102 set_size_and_pinuse_of_inuse_chunk (m, array_chunk, array_chunk_size);
4103 remainder_size = contents_size;
4104 }
4105
4106 /* split out elements */
4107 for (i = 0;; ++i) {
4108 marray[i] = chunk2mem (p);
4109 if (i != n_elements - 1) {
4110 if (element_size != 0)
4111 size = element_size;
4112 else
4113 size = request2size (sizes[i]);
4114 remainder_size -= size;
4115 set_size_and_pinuse_of_inuse_chunk (m, p, size);
4116 p = chunk_plus_offset (p, size);
4117 } else { /* the final element absorbs any overallocation slop */
4118 set_size_and_pinuse_of_inuse_chunk (m, p, remainder_size);
4119 break;
3919 } 4120 }
3920 } 4121 }
3921 4122
3922 assert (chunksize(p) >= nb); 4123 #if DEBUG
3923 assert((((size_t)(chunk2mem(p))) % alignment) == 0); 4124 if (marray != chunks) {
3924 check_inuse_chunk(m, p); 4125 /* final element must have exactly exhausted chunk */
3925 POSTACTION(m); 4126 if (element_size != 0) {
3926 if (leader != 0) { 4127 assert (remainder_size == element_size);
3927 internal_free(m, leader); 4128 } else {
3928 } 4129 assert (remainder_size == request2size (sizes[i]));
3929 if (trailer != 0) { 4130 }
3930 internal_free(m, trailer); 4131 check_inuse_chunk (m, mem2chunk (marray));
3931 } 4132 }
3932 return chunk2mem(p);
3933 }
3934 }
3935 return 0;
3936 }
3937
3938 /* ------------------------ comalloc/coalloc support --------------------- */
3939
3940 static void** ialloc(mstate m,
3941 size_t n_elements,
3942 size_t* sizes,
3943 int opts,
3944 void* chunks[]) {
3945 /*
3946 This provides common support for independent_X routines, handling
3947 all of the combinations that can result.
3948
3949 The opts arg has:
3950 bit 0 set if all elements are same size (using sizes[0])
3951 bit 1 set if elements should be zeroed
3952 */
3953
3954 size_t element_size; /* chunksize of each element, if all same */
3955 size_t contents_size; /* total size of elements */
3956 size_t array_size; /* request size of pointer array */
3957 void* mem; /* malloced aggregate space */
3958 mchunkptr p; /* corresponding chunk */
3959 size_t remainder_size; /* remaining bytes while splitting */
3960 void** marray; /* either "chunks" or malloced ptr array */
3961 mchunkptr array_chunk; /* chunk for malloced ptr array */
3962 flag_t was_enabled; /* to disable mmap */
3963 size_t size;
3964 size_t i;
3965
3966 /* compute array length, if needed */
3967 if (chunks != 0) {
3968 if (n_elements == 0)
3969 return chunks; /* nothing to do */
3970 marray = chunks;
3971 array_size = 0;
3972 }
3973 else {
3974 /* if empty req, must still return chunk representing empty array */
3975 if (n_elements == 0)
3976 return (void**)internal_malloc(m, 0);
3977 marray = 0;
3978 array_size = request2size(n_elements * (sizeof(void*)));
3979 }
3980
3981 /* compute total element size */
3982 if (opts & 0x1) { /* all-same-size */
3983 element_size = request2size(*sizes);
3984 contents_size = n_elements * element_size;
3985 }
3986 else { /* add up all the sizes */
3987 element_size = 0;
3988 contents_size = 0;
3989 for (i = 0; i != n_elements; ++i) 4133 for (i = 0; i != n_elements; ++i)
3990 contents_size += request2size(sizes[i]); 4134 check_inuse_chunk (m, mem2chunk (marray[i]));
3991 }
3992
3993 size = contents_size + array_size;
3994
3995 /*
3996 Allocate the aggregate chunk. First disable direct-mmapping so
3997 malloc won't use it, since we would not be able to later
3998 free/realloc space internal to a segregated mmap region.
3999 */
4000 was_enabled = use_mmap(m);
4001 disable_mmap(m);
4002 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
4003 if (was_enabled)
4004 enable_mmap(m);
4005 if (mem == 0)
4006 return 0;
4007
4008 if (PREACTION(m)) return 0;
4009 p = mem2chunk(mem);
4010 remainder_size = chunksize(p);
4011
4012 assert(!is_mmapped(p));
4013
4014 if (opts & 0x2) { /* optionally clear the elements */
4015 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4016 }
4017
4018 /* If not provided, allocate the pointer array as final part of chunk */
4019 if (marray == 0) {
4020 size_t array_chunk_size;
4021 array_chunk = chunk_plus_offset(p, contents_size);
4022 array_chunk_size = remainder_size - contents_size;
4023 marray = (void**) (chunk2mem(array_chunk));
4024 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
4025 remainder_size = contents_size;
4026 }
4027
4028 /* split out elements */
4029 for (i = 0; ; ++i) {
4030 marray[i] = chunk2mem(p);
4031 if (i != n_elements-1) {
4032 if (element_size != 0)
4033 size = element_size;
4034 else
4035 size = request2size(sizes[i]);
4036 remainder_size -= size;
4037 set_size_and_pinuse_of_inuse_chunk(m, p, size);
4038 p = chunk_plus_offset(p, size);
4039 }
4040 else { /* the final element absorbs any overallocation slop */
4041 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
4042 break;
4043 }
4044 }
4045
4046 #if DEBUG
4047 if (marray != chunks) {
4048 /* final element must have exactly exhausted chunk */
4049 if (element_size != 0) {
4050 assert(remainder_size == element_size);
4051 }
4052 else {
4053 assert(remainder_size == request2size(sizes[i]));
4054 }
4055 check_inuse_chunk(m, mem2chunk(marray));
4056 }
4057 for (i = 0; i != n_elements; ++i)
4058 check_inuse_chunk(m, mem2chunk(marray[i]));
4059 4135
4060 #endif /* DEBUG */ 4136 #endif /* DEBUG */
4061 4137
4062 POSTACTION(m); 4138 POSTACTION (m);
4063 return marray; 4139 return marray;
4064 } 4140 }
4065 4141
4066 4142
4067 /* -------------------------- public routines ---------------------------- */ 4143 /* -------------------------- public routines ---------------------------- */
4068 4144
4069 #if !ONLY_MSPACES 4145 #if !ONLY_MSPACES
4070 4146
4071 void* dlmalloc(size_t bytes) { 4147 void *
4072 /* 4148 dlmalloc (size_t bytes)
4073 Basic algorithm: 4149 {
4074 If a small request (< 256 bytes minus per-chunk overhead): 4150 /*
4151 Basic algorithm:
4152 If a small request (< 256 bytes minus per-chunk overhead):
4075 1. If one exists, use a remainderless chunk in associated smallbin. 4153 1. If one exists, use a remainderless chunk in associated smallbin.
4076 (Remainderless means that there are too few excess bytes to 4154 (Remainderless means that there are too few excess bytes to
4077 represent as a chunk.) 4155 represent as a chunk.)
4078 2. If it is big enough, use the dv chunk, which is normally the 4156 2. If it is big enough, use the dv chunk, which is normally the
4079 chunk adjacent to the one used for the most recent small request. 4157 chunk adjacent to the one used for the most recent small request.
4080 3. If one exists, split the smallest available chunk in a bin, 4158 3. If one exists, split the smallest available chunk in a bin,
4081 saving remainder in dv. 4159 saving remainder in dv.
4082 4. If it is big enough, use the top chunk. 4160 4. If it is big enough, use the top chunk.
4083 5. If available, get memory from system and use it 4161 5. If available, get memory from system and use it
4084 Otherwise, for a large request: 4162 Otherwise, for a large request:
4085 1. Find the smallest available binned chunk that fits, and use it 4163 1. Find the smallest available binned chunk that fits, and use it
4086 if it is better fitting than dv chunk, splitting if necessary. 4164 if it is better fitting than dv chunk, splitting if necessary.
4087 2. If better fitting than any binned chunk, use the dv chunk. 4165 2. If better fitting than any binned chunk, use the dv chunk.
4088 3. If it is big enough, use the top chunk. 4166 3. If it is big enough, use the top chunk.
4089 4. If request size >= mmap threshold, try to directly mmap this chunk. 4167 4. If request size >= mmap threshold, try to directly mmap this chunk.
4090 5. If available, get memory from system and use it 4168 5. If available, get memory from system and use it
4091 4169
4092 The ugly goto's here ensure that postaction occurs along all paths. 4170 The ugly goto's here ensure that postaction occurs along all paths.
4093 */ 4171 */
4094 4172
4095 if (!PREACTION(gm)) { 4173 if (!PREACTION (gm)) {
4096 void* mem; 4174 void *mem;
4097 size_t nb; 4175 size_t nb;
4098 if (bytes <= MAX_SMALL_REQUEST) { 4176 if (bytes <= MAX_SMALL_REQUEST) {
4099 bindex_t idx; 4177 bindex_t idx;
4100 binmap_t smallbits; 4178 binmap_t smallbits;
4101 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); 4179 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request (bytes);
4102 idx = small_index(nb); 4180 idx = small_index (nb);
4103 smallbits = gm->smallmap >> idx; 4181 smallbits = gm->smallmap >> idx;
4104 4182
4105 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ 4183 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4106 mchunkptr b, p; 4184 mchunkptr b, p;
4107 idx += ~smallbits & 1; /* Uses next bin if idx empty */ 4185 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4108 b = smallbin_at(gm, idx); 4186 b = smallbin_at (gm, idx);
4109 p = b->fd; 4187 p = b->fd;
4110 assert(chunksize(p) == small_index2size(idx)); 4188 assert (chunksize (p) == small_index2size (idx));
4111 unlink_first_small_chunk(gm, b, p, idx); 4189 unlink_first_small_chunk (gm, b, p, idx);
4112 set_inuse_and_pinuse(gm, p, small_index2size(idx)); 4190 set_inuse_and_pinuse (gm, p, small_index2size (idx));
4113 mem = chunk2mem(p); 4191 mem = chunk2mem (p);
4114 check_malloced_chunk(gm, mem, nb); 4192 check_malloced_chunk (gm, mem, nb);
4115 goto postaction; 4193 goto postaction;
4116 } 4194 }
4117 4195
4118 else if (nb > gm->dvsize) { 4196 else if (nb > gm->dvsize) {
4119 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ 4197 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4120 mchunkptr b, p, r; 4198 mchunkptr b, p, r;
4121 size_t rsize; 4199 size_t rsize;
4122 bindex_t i; 4200 bindex_t i;
4123 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); 4201 binmap_t leftbits =
4124 binmap_t leastbit = least_bit(leftbits); 4202 (smallbits << idx) & left_bits (idx2bit (idx));
4125 compute_bit2idx(leastbit, i); 4203 binmap_t leastbit = least_bit (leftbits);
4126 b = smallbin_at(gm, i); 4204 compute_bit2idx (leastbit, i);
4127 p = b->fd; 4205 b = smallbin_at (gm, i);
4128 assert(chunksize(p) == small_index2size(i)); 4206 p = b->fd;
4129 unlink_first_small_chunk(gm, b, p, i); 4207 assert (chunksize (p) == small_index2size (i));
4130 rsize = small_index2size(i) - nb; 4208 unlink_first_small_chunk (gm, b, p, i);
4131 /* Fit here cannot be remainderless if 4byte sizes */ 4209 rsize = small_index2size (i) - nb;
4132 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) 4210 /* Fit here cannot be remainderless if 4byte sizes */
4133 set_inuse_and_pinuse(gm, p, small_index2size(i)); 4211 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4134 else { 4212 set_inuse_and_pinuse (gm, p, small_index2size (i));
4135 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); 4213 else {
4136 r = chunk_plus_offset(p, nb); 4214 set_size_and_pinuse_of_inuse_chunk (gm, p, nb);
4137 set_size_and_pinuse_of_free_chunk(r, rsize); 4215 r = chunk_plus_offset (p, nb);
4138 replace_dv(gm, r, rsize); 4216 set_size_and_pinuse_of_free_chunk (r, rsize);
4139 } 4217 replace_dv (gm, r, rsize);
4140 mem = chunk2mem(p); 4218 }
4141 check_malloced_chunk(gm, mem, nb); 4219 mem = chunk2mem (p);
4142 goto postaction; 4220 check_malloced_chunk (gm, mem, nb);
4221 goto postaction;
4222 }
4223
4224 else if (gm->treemap != 0
4225 && (mem = tmalloc_small (gm, nb)) != 0) {
4226 check_malloced_chunk (gm, mem, nb);
4227 goto postaction;
4228 }
4229 }
4230 } else if (bytes >= MAX_REQUEST)
4231 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4232 else {
4233 nb = pad_request (bytes);
4234 if (gm->treemap != 0 && (mem = tmalloc_large (gm, nb)) != 0) {
4235 check_malloced_chunk (gm, mem, nb);
4236 goto postaction;
4237 }
4143 } 4238 }
4144 4239
4145 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { 4240 if (nb <= gm->dvsize) {
4146 check_malloced_chunk(gm, mem, nb); 4241 size_t rsize = gm->dvsize - nb;
4147 goto postaction; 4242 mchunkptr p = gm->dv;
4243 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4244 mchunkptr r = gm->dv = chunk_plus_offset (p, nb);
4245 gm->dvsize = rsize;
4246 set_size_and_pinuse_of_free_chunk (r, rsize);
4247 set_size_and_pinuse_of_inuse_chunk (gm, p, nb);
4248 } else { /* exhaust dv */
4249 size_t dvs = gm->dvsize;
4250 gm->dvsize = 0;
4251 gm->dv = 0;
4252 set_inuse_and_pinuse (gm, p, dvs);
4253 }
4254 mem = chunk2mem (p);
4255 check_malloced_chunk (gm, mem, nb);
4256 goto postaction;
4148 } 4257 }
4149 } 4258
4150 } 4259 else if (nb < gm->topsize) { /* Split top */
4151 else if (bytes >= MAX_REQUEST) 4260 size_t rsize = gm->topsize -= nb;
4152 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ 4261 mchunkptr p = gm->top;
4153 else { 4262 mchunkptr r = gm->top = chunk_plus_offset (p, nb);
4154 nb = pad_request(bytes); 4263 r->head = rsize | PINUSE_BIT;
4155 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { 4264 set_size_and_pinuse_of_inuse_chunk (gm, p, nb);
4156 check_malloced_chunk(gm, mem, nb); 4265 mem = chunk2mem (p);
4157 goto postaction; 4266 check_top_chunk (gm, gm->top);
4158 } 4267 check_malloced_chunk (gm, mem, nb);
4159 } 4268 goto postaction;
4160 4269 }
4161 if (nb <= gm->dvsize) { 4270
4162 size_t rsize = gm->dvsize - nb; 4271 mem = sys_alloc (gm, nb);
4163 mchunkptr p = gm->dv; 4272
4164 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ 4273 postaction:
4165 mchunkptr r = gm->dv = chunk_plus_offset(p, nb); 4274 POSTACTION (gm);
4166 gm->dvsize = rsize; 4275 return mem;
4167 set_size_and_pinuse_of_free_chunk(r, rsize); 4276 }
4168 set_size_and_pinuse_of_inuse_chunk(gm, p, nb); 4277
4169 } 4278 return 0;
4170 else { /* exhaust dv */ 4279 }
4171 size_t dvs = gm->dvsize; 4280
4172 gm->dvsize = 0; 4281 void
4173 gm->dv = 0; 4282 dlfree (void *mem)
4174 set_inuse_and_pinuse(gm, p, dvs); 4283 {
4175 } 4284 /*
4176 mem = chunk2mem(p); 4285 Consolidate freed chunks with preceeding or succeeding bordering
4177 check_malloced_chunk(gm, mem, nb); 4286 free chunks, if they exist, and then place in a bin. Intermixed
4178 goto postaction; 4287 with special cases for top, dv, mmapped chunks, and usage errors.
4179 } 4288 */
4180 4289
4181 else if (nb < gm->topsize) { /* Split top */ 4290 if (mem != 0) {
4182 size_t rsize = gm->topsize -= nb; 4291 mchunkptr p = mem2chunk (mem);
4183 mchunkptr p = gm->top;
4184 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4185 r->head = rsize | PINUSE_BIT;
4186 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4187 mem = chunk2mem(p);
4188 check_top_chunk(gm, gm->top);
4189 check_malloced_chunk(gm, mem, nb);
4190 goto postaction;
4191 }
4192
4193 mem = sys_alloc(gm, nb);
4194
4195 postaction:
4196 POSTACTION(gm);
4197 return mem;
4198 }
4199
4200 return 0;
4201 }
4202
4203 void dlfree(void* mem) {
4204 /*
4205 Consolidate freed chunks with preceeding or succeeding bordering
4206 free chunks, if they exist, and then place in a bin. Intermixed
4207 with special cases for top, dv, mmapped chunks, and usage errors.
4208 */
4209
4210 if (mem != 0) {
4211 mchunkptr p = mem2chunk(mem);
4212 #if FOOTERS 4292 #if FOOTERS
4213 mstate fm = get_mstate_for(p); 4293 mstate fm = get_mstate_for (p);
4214 if (!ok_magic(fm)) { 4294 if (!ok_magic (fm)) {
4215 USAGE_ERROR_ACTION(fm, p); 4295 USAGE_ERROR_ACTION (fm, p);
4216 return; 4296 return;
4217 } 4297 }
4218 #else /* FOOTERS */ 4298 #else /* FOOTERS */
4219 #define fm gm 4299 #define fm gm
4220 #endif /* FOOTERS */ 4300 #endif /* FOOTERS */
4221 if (!PREACTION(fm)) { 4301 if (!PREACTION (fm)) {
4222 check_inuse_chunk(fm, p); 4302 check_inuse_chunk (fm, p);
4223 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { 4303 if (RTCHECK (ok_address (fm, p) && ok_cinuse (p))) {
4224 size_t psize = chunksize(p); 4304 size_t psize = chunksize (p);
4225 mchunkptr next = chunk_plus_offset(p, psize); 4305 mchunkptr next = chunk_plus_offset (p, psize);
4226 if (!pinuse(p)) { 4306 if (!pinuse (p)) {
4227 size_t prevsize = p->prev_foot; 4307 size_t prevsize = p->prev_foot;
4228 if ((prevsize & IS_MMAPPED_BIT) != 0) { 4308 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4229 prevsize &= ~IS_MMAPPED_BIT; 4309 prevsize &= ~IS_MMAPPED_BIT;
4230 psize += prevsize + MMAP_FOOT_PAD; 4310 psize += prevsize + MMAP_FOOT_PAD;
4231 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) 4311 if (CALL_MUNMAP ((char *) p - prevsize, psize) == 0)
4232 fm->footprint -= psize; 4312 fm->footprint -= psize;
4233 goto postaction; 4313 goto postaction;
4234 } 4314 } else {
4235 else { 4315 mchunkptr prev = chunk_minus_offset (p, prevsize);
4236 mchunkptr prev = chunk_minus_offset(p, prevsize); 4316 psize += prevsize;
4237 psize += prevsize; 4317 p = prev;
4238 p = prev; 4318 if (RTCHECK (ok_address (fm, prev))) { /* consolidate backward */
4239 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ 4319 if (p != fm->dv) {
4240 if (p != fm->dv) { 4320 unlink_chunk (fm, p, prevsize);
4241 unlink_chunk(fm, p, prevsize); 4321 } else if ((next->head & INUSE_BITS) ==
4242 } 4322 INUSE_BITS) {
4243 else if ((next->head & INUSE_BITS) == INUSE_BITS) { 4323 fm->dvsize = psize;
4244 fm->dvsize = psize; 4324 set_free_with_pinuse (p, psize, next);
4245 set_free_with_pinuse(p, psize, next); 4325 goto postaction;
4246 goto postaction; 4326 }
4247 } 4327 } else
4328 goto erroraction;
4329 }
4330 }
4331
4332 if (RTCHECK (ok_next (p, next) && ok_pinuse (next))) {
4333 if (!cinuse (next)) { /* consolidate forward */
4334 if (next == fm->top) {
4335 size_t tsize = fm->topsize += psize;
4336 fm->top = p;
4337 p->head = tsize | PINUSE_BIT;
4338 if (p == fm->dv) {
4339 fm->dv = 0;
4340 fm->dvsize = 0;
4341 }
4342 if (should_trim (fm, tsize))
4343 sys_trim (fm, 0);
4344 goto postaction;
4345 } else if (next == fm->dv) {
4346 size_t dsize = fm->dvsize += psize;
4347 fm->dv = p;
4348 set_size_and_pinuse_of_free_chunk (p, dsize);
4349 goto postaction;
4350 } else {
4351 size_t nsize = chunksize (next);
4352 psize += nsize;
4353 unlink_chunk (fm, next, nsize);
4354 set_size_and_pinuse_of_free_chunk (p, psize);
4355 if (p == fm->dv) {
4356 fm->dvsize = psize;
4357 goto postaction;
4358 }
4359 }
4360 } else
4361 set_free_with_pinuse (p, psize, next);
4362 insert_chunk (fm, p, psize);
4363 check_free_chunk (fm, p);
4364 goto postaction;
4365 }
4248 } 4366 }
4249 else 4367 erroraction:
4250 goto erroraction; 4368 USAGE_ERROR_ACTION (fm, p);
4251 } 4369 postaction:
4370 POSTACTION (fm);
4252 } 4371 }
4253 4372 }
4254 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4255 if (!cinuse(next)) { /* consolidate forward */
4256 if (next == fm->top) {
4257 size_t tsize = fm->topsize += psize;
4258 fm->top = p;
4259 p->head = tsize | PINUSE_BIT;
4260 if (p == fm->dv) {
4261 fm->dv = 0;
4262 fm->dvsize = 0;
4263 }
4264 if (should_trim(fm, tsize))
4265 sys_trim(fm, 0);
4266 goto postaction;
4267 }
4268 else if (next == fm->dv) {
4269 size_t dsize = fm->dvsize += psize;
4270 fm->dv = p;
4271 set_size_and_pinuse_of_free_chunk(p, dsize);
4272 goto postaction;
4273 }
4274 else {
4275 size_t nsize = chunksize(next);
4276 psize += nsize;
4277 unlink_chunk(fm, next, nsize);
4278 set_size_and_pinuse_of_free_chunk(p, psize);
4279 if (p == fm->dv) {
4280 fm->dvsize = psize;
4281 goto postaction;
4282 }
4283 }
4284 }
4285 else
4286 set_free_with_pinuse(p, psize, next);
4287 insert_chunk(fm, p, psize);
4288 check_free_chunk(fm, p);
4289 goto postaction;
4290 }
4291 }
4292 erroraction:
4293 USAGE_ERROR_ACTION(fm, p);
4294 postaction:
4295 POSTACTION(fm);
4296 }
4297 }
4298 #if !FOOTERS 4373 #if !FOOTERS
4299 #undef fm 4374 #undef fm
4300 #endif /* FOOTERS */ 4375 #endif /* FOOTERS */
4301 } 4376 }
4302 4377
4303 void* dlcalloc(size_t n_elements, size_t elem_size) { 4378 void *
4304 void* mem; 4379 dlcalloc (size_t n_elements, size_t elem_size)
4305 size_t req = 0; 4380 {
4306 if (n_elements != 0) { 4381 void *mem;
4307 req = n_elements * elem_size; 4382 size_t req = 0;
4308 if (((n_elements | elem_size) & ~(size_t)0xffff) && 4383 if (n_elements != 0) {
4309 (req / n_elements != elem_size)) 4384 req = n_elements * elem_size;
4310 req = MAX_SIZE_T; /* force downstream failure on overflow */ 4385 if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4311 } 4386 (req / n_elements != elem_size))
4312 mem = dlmalloc(req); 4387 req = MAX_SIZE_T; /* force downstream failure on overflow */
4313 if (mem != 0 && calloc_must_clear(mem2chunk(mem))) 4388 }
4314 memset(mem, 0, req); 4389 mem = dlmalloc (req);
4315 return mem; 4390 if (mem != 0 && calloc_must_clear (mem2chunk (mem)))
4316 } 4391 memset (mem, 0, req);
4317 4392 return mem;
4318 void* dlrealloc(void* oldmem, size_t bytes) { 4393 }
4319 if (oldmem == 0) 4394
4320 return dlmalloc(bytes); 4395 void *
4396 dlrealloc (void *oldmem, size_t bytes)
4397 {
4398 if (oldmem == 0)
4399 return dlmalloc (bytes);
4321 #ifdef REALLOC_ZERO_BYTES_FREES 4400 #ifdef REALLOC_ZERO_BYTES_FREES
4322 if (bytes == 0) { 4401 if (bytes == 0) {
4323 dlfree(oldmem); 4402 dlfree (oldmem);
4403 return 0;
4404 }
4405 #endif /* REALLOC_ZERO_BYTES_FREES */
4406 else {
4407 #if ! FOOTERS
4408 mstate m = gm;
4409 #else /* FOOTERS */
4410 mstate m = get_mstate_for (mem2chunk (oldmem));
4411 if (!ok_magic (m)) {
4412 USAGE_ERROR_ACTION (m, oldmem);
4413 return 0;
4414 }
4415 #endif /* FOOTERS */
4416 return internal_realloc (m, oldmem, bytes);
4417 }
4418 }
4419
4420 void *
4421 dlmemalign (size_t alignment, size_t bytes)
4422 {
4423 return internal_memalign (gm, alignment, bytes);
4424 }
4425
4426 void **
4427 dlindependent_calloc (size_t n_elements, size_t elem_size, void *chunks[])
4428 {
4429 size_t sz = elem_size; /* serves as 1-element array */
4430 return ialloc (gm, n_elements, &sz, 3, chunks);
4431 }
4432
4433 void **
4434 dlindependent_comalloc (size_t n_elements, size_t sizes[], void *chunks[])
4435 {
4436 return ialloc (gm, n_elements, sizes, 0, chunks);
4437 }
4438
4439 void *
4440 dlvalloc (size_t bytes)
4441 {
4442 size_t pagesz;
4443 init_mparams ();
4444 pagesz = mparams.page_size;
4445 return dlmemalign (pagesz, bytes);
4446 }
4447
4448 void *
4449 dlpvalloc (size_t bytes)
4450 {
4451 size_t pagesz;
4452 init_mparams ();
4453 pagesz = mparams.page_size;
4454 return dlmemalign (pagesz,
4455 (bytes + pagesz - SIZE_T_ONE) & ~(pagesz -
4456 SIZE_T_ONE));
4457 }
4458
4459 int
4460 dlmalloc_trim (size_t pad)
4461 {
4462 int result = 0;
4463 if (!PREACTION (gm)) {
4464 result = sys_trim (gm, pad);
4465 POSTACTION (gm);
4466 }
4467 return result;
4468 }
4469
4470 size_t
4471 dlmalloc_footprint (void)
4472 {
4473 return gm->footprint;
4474 }
4475
4476 size_t
4477 dlmalloc_max_footprint (void)
4478 {
4479 return gm->max_footprint;
4480 }
4481
4482 #if !NO_MALLINFO
4483 struct mallinfo
4484 dlmallinfo (void)
4485 {
4486 return internal_mallinfo (gm);
4487 }
4488 #endif /* NO_MALLINFO */
4489
4490 void
4491 dlmalloc_stats ()
4492 {
4493 internal_malloc_stats (gm);
4494 }
4495
4496 size_t
4497 dlmalloc_usable_size (void *mem)
4498 {
4499 if (mem != 0) {
4500 mchunkptr p = mem2chunk (mem);
4501 if (cinuse (p))
4502 return chunksize (p) - overhead_for (p);
4503 }
4324 return 0; 4504 return 0;
4325 } 4505 }
4326 #endif /* REALLOC_ZERO_BYTES_FREES */ 4506
4327 else { 4507 int
4328 #if ! FOOTERS 4508 dlmallopt (int param_number, int value)
4329 mstate m = gm; 4509 {
4330 #else /* FOOTERS */ 4510 return change_mparam (param_number, value);
4331 mstate m = get_mstate_for(mem2chunk(oldmem));
4332 if (!ok_magic(m)) {
4333 USAGE_ERROR_ACTION(m, oldmem);
4334 return 0;
4335 }
4336 #endif /* FOOTERS */
4337 return internal_realloc(m, oldmem, bytes);
4338 }
4339 }
4340
4341 void* dlmemalign(size_t alignment, size_t bytes) {
4342 return internal_memalign(gm, alignment, bytes);
4343 }
4344
4345 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
4346 void* chunks[]) {
4347 size_t sz = elem_size; /* serves as 1-element array */
4348 return ialloc(gm, n_elements, &sz, 3, chunks);
4349 }
4350
4351 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
4352 void* chunks[]) {
4353 return ialloc(gm, n_elements, sizes, 0, chunks);
4354 }
4355
4356 void* dlvalloc(size_t bytes) {
4357 size_t pagesz;
4358 init_mparams();
4359 pagesz = mparams.page_size;
4360 return dlmemalign(pagesz, bytes);
4361 }
4362
4363 void* dlpvalloc(size_t bytes) {
4364 size_t pagesz;
4365 init_mparams();
4366 pagesz = mparams.page_size;
4367 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
4368 }
4369
4370 int dlmalloc_trim(size_t pad) {
4371 int result = 0;
4372 if (!PREACTION(gm)) {
4373 result = sys_trim(gm, pad);
4374 POSTACTION(gm);
4375 }
4376 return result;
4377 }
4378
4379 size_t dlmalloc_footprint(void) {
4380 return gm->footprint;
4381 }
4382
4383 size_t dlmalloc_max_footprint(void) {
4384 return gm->max_footprint;
4385 }
4386
4387 #if !NO_MALLINFO
4388 struct mallinfo dlmallinfo(void) {
4389 return internal_mallinfo(gm);
4390 }
4391 #endif /* NO_MALLINFO */
4392
4393 void dlmalloc_stats() {
4394 internal_malloc_stats(gm);
4395 }
4396
4397 size_t dlmalloc_usable_size(void* mem) {
4398 if (mem != 0) {
4399 mchunkptr p = mem2chunk(mem);
4400 if (cinuse(p))
4401 return chunksize(p) - overhead_for(p);
4402 }
4403 return 0;
4404 }
4405
4406 int dlmallopt(int param_number, int value) {
4407 return change_mparam(param_number, value);
4408 } 4511 }
4409 4512
4410 #endif /* !ONLY_MSPACES */ 4513 #endif /* !ONLY_MSPACES */
4411 4514
4412 /* ----------------------------- user mspaces ---------------------------- */ 4515 /* ----------------------------- user mspaces ---------------------------- */
4413 4516
4414 #if MSPACES 4517 #if MSPACES
4415 4518
4416 static mstate init_user_mstate(char* tbase, size_t tsize) { 4519 static mstate
4417 size_t msize = pad_request(sizeof(struct malloc_state)); 4520 init_user_mstate (char *tbase, size_t tsize)
4418 mchunkptr mn; 4521 {
4419 mchunkptr msp = align_as_chunk(tbase); 4522 size_t msize = pad_request (sizeof (struct malloc_state));
4420 mstate m = (mstate)(chunk2mem(msp)); 4523 mchunkptr mn;
4421 memset(m, 0, msize); 4524 mchunkptr msp = align_as_chunk (tbase);
4422 INITIAL_LOCK(&m->mutex); 4525 mstate m = (mstate) (chunk2mem (msp));
4423 msp->head = (msize|PINUSE_BIT|CINUSE_BIT); 4526 memset (m, 0, msize);
4424 m->seg.base = m->least_addr = tbase; 4527 INITIAL_LOCK (&m->mutex);
4425 m->seg.size = m->footprint = m->max_footprint = tsize; 4528 msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
4426 m->magic = mparams.magic; 4529 m->seg.base = m->least_addr = tbase;
4427 m->mflags = mparams.default_mflags; 4530 m->seg.size = m->footprint = m->max_footprint = tsize;
4428 disable_contiguous(m); 4531 m->magic = mparams.magic;
4429 init_bins(m); 4532 m->mflags = mparams.default_mflags;
4430 mn = next_chunk(mem2chunk(m)); 4533 disable_contiguous (m);
4431 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); 4534 init_bins (m);
4432 check_top_chunk(m, m->top); 4535 mn = next_chunk (mem2chunk (m));
4433 return m; 4536 init_top (m, mn,
4434 } 4537 (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
4435 4538 check_top_chunk (m, m->top);
4436 mspace create_mspace(size_t capacity, int locked) { 4539 return m;
4437 mstate m = 0; 4540 }
4438 size_t msize = pad_request(sizeof(struct malloc_state)); 4541
4439 init_mparams(); /* Ensure pagesize etc initialized */ 4542 mspace
4440 4543 create_mspace (size_t capacity, int locked)
4441 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { 4544 {
4442 size_t rs = ((capacity == 0)? mparams.granularity : 4545 mstate m = 0;
4443 (capacity + TOP_FOOT_SIZE + msize)); 4546 size_t msize = pad_request (sizeof (struct malloc_state));
4444 size_t tsize = granularity_align(rs); 4547 init_mparams (); /* Ensure pagesize etc initialized */
4445 char* tbase = (char*)(CALL_MMAP(tsize)); 4548
4446 if (tbase != CMFAIL) { 4549 if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4447 m = init_user_mstate(tbase, tsize); 4550 size_t rs = ((capacity == 0) ? mparams.granularity :
4448 m->seg.sflags = IS_MMAPPED_BIT; 4551 (capacity + TOP_FOOT_SIZE + msize));
4449 set_lock(m, locked); 4552 size_t tsize = granularity_align (rs);
4450 } 4553 char *tbase = (char *) (CALL_MMAP (tsize));
4451 } 4554 if (tbase != CMFAIL) {
4452 return (mspace)m; 4555 m = init_user_mstate (tbase, tsize);
4453 } 4556 m->seg.sflags = IS_MMAPPED_BIT;
4454 4557 set_lock (m, locked);
4455 mspace create_mspace_with_base(void* base, size_t capacity, int locked) { 4558 }
4456 mstate m = 0; 4559 }
4457 size_t msize = pad_request(sizeof(struct malloc_state)); 4560 return (mspace) m;
4458 init_mparams(); /* Ensure pagesize etc initialized */ 4561 }
4459 4562
4460 if (capacity > msize + TOP_FOOT_SIZE && 4563 mspace
4461 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { 4564 create_mspace_with_base (void *base, size_t capacity, int locked)
4462 m = init_user_mstate((char*)base, capacity); 4565 {
4463 m->seg.sflags = EXTERN_BIT; 4566 mstate m = 0;
4464 set_lock(m, locked); 4567 size_t msize = pad_request (sizeof (struct malloc_state));
4465 } 4568 init_mparams (); /* Ensure pagesize etc initialized */
4466 return (mspace)m; 4569
4467 } 4570 if (capacity > msize + TOP_FOOT_SIZE &&
4468 4571 capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4469 size_t destroy_mspace(mspace msp) { 4572 m = init_user_mstate ((char *) base, capacity);
4470 size_t freed = 0; 4573 m->seg.sflags = EXTERN_BIT;
4471 mstate ms = (mstate)msp; 4574 set_lock (m, locked);
4472 if (ok_magic(ms)) { 4575 }
4473 msegmentptr sp = &ms->seg; 4576 return (mspace) m;
4474 while (sp != 0) { 4577 }
4475 char* base = sp->base; 4578
4476 size_t size = sp->size; 4579 size_t
4477 flag_t flag = sp->sflags; 4580 destroy_mspace (mspace msp)
4478 sp = sp->next; 4581 {
4479 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && 4582 size_t freed = 0;
4480 CALL_MUNMAP(base, size) == 0) 4583 mstate ms = (mstate) msp;
4481 freed += size; 4584 if (ok_magic (ms)) {
4482 } 4585 msegmentptr sp = &ms->seg;
4483 } 4586 while (sp != 0) {
4484 else { 4587 char *base = sp->base;
4485 USAGE_ERROR_ACTION(ms,ms); 4588 size_t size = sp->size;
4486 } 4589 flag_t flag = sp->sflags;
4487 return freed; 4590 sp = sp->next;
4591 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
4592 CALL_MUNMAP (base, size) == 0)
4593 freed += size;
4594 }
4595 } else {
4596 USAGE_ERROR_ACTION (ms, ms);
4597 }
4598 return freed;
4488 } 4599 }
4489 4600
4490 /* 4601 /*
4491 mspace versions of routines are near-clones of the global 4602 mspace versions of routines are near-clones of the global
4492 versions. This is not so nice but better than the alternatives. 4603 versions. This is not so nice but better than the alternatives.
4493 */ 4604 */
4494 4605
4495 4606
4496 void* mspace_malloc(mspace msp, size_t bytes) { 4607 void *
4497 mstate ms = (mstate)msp; 4608 mspace_malloc (mspace msp, size_t bytes)
4498 if (!ok_magic(ms)) { 4609 {
4499 USAGE_ERROR_ACTION(ms,ms); 4610 mstate ms = (mstate) msp;
4611 if (!ok_magic (ms)) {
4612 USAGE_ERROR_ACTION (ms, ms);
4613 return 0;
4614 }
4615 if (!PREACTION (ms)) {
4616 void *mem;
4617 size_t nb;
4618 if (bytes <= MAX_SMALL_REQUEST) {
4619 bindex_t idx;
4620 binmap_t smallbits;
4621 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request (bytes);
4622 idx = small_index (nb);
4623 smallbits = ms->smallmap >> idx;
4624
4625 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4626 mchunkptr b, p;
4627 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4628 b = smallbin_at (ms, idx);
4629 p = b->fd;
4630 assert (chunksize (p) == small_index2size (idx));
4631 unlink_first_small_chunk (ms, b, p, idx);
4632 set_inuse_and_pinuse (ms, p, small_index2size (idx));
4633 mem = chunk2mem (p);
4634 check_malloced_chunk (ms, mem, nb);
4635 goto postaction;
4636 }
4637
4638 else if (nb > ms->dvsize) {
4639 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4640 mchunkptr b, p, r;
4641 size_t rsize;
4642 bindex_t i;
4643 binmap_t leftbits =
4644 (smallbits << idx) & left_bits (idx2bit (idx));
4645 binmap_t leastbit = least_bit (leftbits);
4646 compute_bit2idx (leastbit, i);
4647 b = smallbin_at (ms, i);
4648 p = b->fd;
4649 assert (chunksize (p) == small_index2size (i));
4650 unlink_first_small_chunk (ms, b, p, i);
4651 rsize = small_index2size (i) - nb;
4652 /* Fit here cannot be remainderless if 4byte sizes */
4653 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4654 set_inuse_and_pinuse (ms, p, small_index2size (i));
4655 else {
4656 set_size_and_pinuse_of_inuse_chunk (ms, p, nb);
4657 r = chunk_plus_offset (p, nb);
4658 set_size_and_pinuse_of_free_chunk (r, rsize);
4659 replace_dv (ms, r, rsize);
4660 }
4661 mem = chunk2mem (p);
4662 check_malloced_chunk (ms, mem, nb);
4663 goto postaction;
4664 }
4665
4666 else if (ms->treemap != 0
4667 && (mem = tmalloc_small (ms, nb)) != 0) {
4668 check_malloced_chunk (ms, mem, nb);
4669 goto postaction;
4670 }
4671 }
4672 } else if (bytes >= MAX_REQUEST)
4673 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4674 else {
4675 nb = pad_request (bytes);
4676 if (ms->treemap != 0 && (mem = tmalloc_large (ms, nb)) != 0) {
4677 check_malloced_chunk (ms, mem, nb);
4678 goto postaction;
4679 }
4680 }
4681
4682 if (nb <= ms->dvsize) {
4683 size_t rsize = ms->dvsize - nb;
4684 mchunkptr p = ms->dv;
4685 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4686 mchunkptr r = ms->dv = chunk_plus_offset (p, nb);
4687 ms->dvsize = rsize;
4688 set_size_and_pinuse_of_free_chunk (r, rsize);
4689 set_size_and_pinuse_of_inuse_chunk (ms, p, nb);
4690 } else { /* exhaust dv */
4691 size_t dvs = ms->dvsize;
4692 ms->dvsize = 0;
4693 ms->dv = 0;
4694 set_inuse_and_pinuse (ms, p, dvs);
4695 }
4696 mem = chunk2mem (p);
4697 check_malloced_chunk (ms, mem, nb);
4698 goto postaction;
4699 }
4700
4701 else if (nb < ms->topsize) { /* Split top */
4702 size_t rsize = ms->topsize -= nb;
4703 mchunkptr p = ms->top;
4704 mchunkptr r = ms->top = chunk_plus_offset (p, nb);
4705 r->head = rsize | PINUSE_BIT;
4706 set_size_and_pinuse_of_inuse_chunk (ms, p, nb);
4707 mem = chunk2mem (p);
4708 check_top_chunk (ms, ms->top);
4709 check_malloced_chunk (ms, mem, nb);
4710 goto postaction;
4711 }
4712
4713 mem = sys_alloc (ms, nb);
4714
4715 postaction:
4716 POSTACTION (ms);
4717 return mem;
4718 }
4719
4500 return 0; 4720 return 0;
4501 } 4721 }
4502 if (!PREACTION(ms)) { 4722
4503 void* mem; 4723 void
4504 size_t nb; 4724 mspace_free (mspace msp, void *mem)
4505 if (bytes <= MAX_SMALL_REQUEST) { 4725 {
4506 bindex_t idx; 4726 if (mem != 0) {
4507 binmap_t smallbits; 4727 mchunkptr p = mem2chunk (mem);
4508 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); 4728 #if FOOTERS
4509 idx = small_index(nb); 4729 mstate fm = get_mstate_for (p);
4510 smallbits = ms->smallmap >> idx; 4730 #else /* FOOTERS */
4511 4731 mstate fm = (mstate) msp;
4512 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ 4732 #endif /* FOOTERS */
4513 mchunkptr b, p; 4733 if (!ok_magic (fm)) {
4514 idx += ~smallbits & 1; /* Uses next bin if idx empty */ 4734 USAGE_ERROR_ACTION (fm, p);
4515 b = smallbin_at(ms, idx); 4735 return;
4516 p = b->fd;
4517 assert(chunksize(p) == small_index2size(idx));
4518 unlink_first_small_chunk(ms, b, p, idx);
4519 set_inuse_and_pinuse(ms, p, small_index2size(idx));
4520 mem = chunk2mem(p);
4521 check_malloced_chunk(ms, mem, nb);
4522 goto postaction;
4523 }
4524
4525 else if (nb > ms->dvsize) {
4526 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4527 mchunkptr b, p, r;
4528 size_t rsize;
4529 bindex_t i;
4530 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4531 binmap_t leastbit = least_bit(leftbits);
4532 compute_bit2idx(leastbit, i);
4533 b = smallbin_at(ms, i);
4534 p = b->fd;
4535 assert(chunksize(p) == small_index2size(i));
4536 unlink_first_small_chunk(ms, b, p, i);
4537 rsize = small_index2size(i) - nb;
4538 /* Fit here cannot be remainderless if 4byte sizes */
4539 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4540 set_inuse_and_pinuse(ms, p, small_index2size(i));
4541 else {
4542 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4543 r = chunk_plus_offset(p, nb);
4544 set_size_and_pinuse_of_free_chunk(r, rsize);
4545 replace_dv(ms, r, rsize);
4546 }
4547 mem = chunk2mem(p);
4548 check_malloced_chunk(ms, mem, nb);
4549 goto postaction;
4550 } 4736 }
4551 4737 if (!PREACTION (fm)) {
4552 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { 4738 check_inuse_chunk (fm, p);
4553 check_malloced_chunk(ms, mem, nb); 4739 if (RTCHECK (ok_address (fm, p) && ok_cinuse (p))) {
4554 goto postaction; 4740 size_t psize = chunksize (p);
4741 mchunkptr next = chunk_plus_offset (p, psize);
4742 if (!pinuse (p)) {
4743 size_t prevsize = p->prev_foot;
4744 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4745 prevsize &= ~IS_MMAPPED_BIT;
4746 psize += prevsize + MMAP_FOOT_PAD;
4747 if (CALL_MUNMAP ((char *) p - prevsize, psize) == 0)
4748 fm->footprint -= psize;
4749 goto postaction;
4750 } else {
4751 mchunkptr prev = chunk_minus_offset (p, prevsize);
4752 psize += prevsize;
4753 p = prev;
4754 if (RTCHECK (ok_address (fm, prev))) { /* consolidate backward */
4755 if (p != fm->dv) {
4756 unlink_chunk (fm, p, prevsize);
4757 } else if ((next->head & INUSE_BITS) ==
4758 INUSE_BITS) {
4759 fm->dvsize = psize;
4760 set_free_with_pinuse (p, psize, next);
4761 goto postaction;
4762 }
4763 } else
4764 goto erroraction;
4765 }
4766 }
4767
4768 if (RTCHECK (ok_next (p, next) && ok_pinuse (next))) {
4769 if (!cinuse (next)) { /* consolidate forward */
4770 if (next == fm->top) {
4771 size_t tsize = fm->topsize += psize;
4772 fm->top = p;
4773 p->head = tsize | PINUSE_BIT;
4774 if (p == fm->dv) {
4775 fm->dv = 0;
4776 fm->dvsize = 0;
4777 }
4778 if (should_trim (fm, tsize))
4779 sys_trim (fm, 0);
4780 goto postaction;
4781 } else if (next == fm->dv) {
4782 size_t dsize = fm->dvsize += psize;
4783 fm->dv = p;
4784 set_size_and_pinuse_of_free_chunk (p, dsize);
4785 goto postaction;
4786 } else {
4787 size_t nsize = chunksize (next);
4788 psize += nsize;
4789 unlink_chunk (fm, next, nsize);
4790 set_size_and_pinuse_of_free_chunk (p, psize);
4791 if (p == fm->dv) {
4792 fm->dvsize = psize;
4793 goto postaction;
4794 }
4795 }
4796 } else
4797 set_free_with_pinuse (p, psize, next);
4798 insert_chunk (fm, p, psize);
4799 check_free_chunk (fm, p);
4800 goto postaction;
4801 }
4802 }
4803 erroraction:
4804 USAGE_ERROR_ACTION (fm, p);
4805 postaction:
4806 POSTACTION (fm);
4555 } 4807 }
4556 } 4808 }
4557 } 4809 }
4558 else if (bytes >= MAX_REQUEST) 4810
4559 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ 4811 void *
4812 mspace_calloc (mspace msp, size_t n_elements, size_t elem_size)
4813 {
4814 void *mem;
4815 size_t req = 0;
4816 mstate ms = (mstate) msp;
4817 if (!ok_magic (ms)) {
4818 USAGE_ERROR_ACTION (ms, ms);
4819 return 0;
4820 }
4821 if (n_elements != 0) {
4822 req = n_elements * elem_size;
4823 if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4824 (req / n_elements != elem_size))
4825 req = MAX_SIZE_T; /* force downstream failure on overflow */
4826 }
4827 mem = internal_malloc (ms, req);
4828 if (mem != 0 && calloc_must_clear (mem2chunk (mem)))
4829 memset (mem, 0, req);
4830 return mem;
4831 }
4832
4833 void *
4834 mspace_realloc (mspace msp, void *oldmem, size_t bytes)
4835 {
4836 if (oldmem == 0)
4837 return mspace_malloc (msp, bytes);
4838 #ifdef REALLOC_ZERO_BYTES_FREES
4839 if (bytes == 0) {
4840 mspace_free (msp, oldmem);
4841 return 0;
4842 }
4843 #endif /* REALLOC_ZERO_BYTES_FREES */
4560 else { 4844 else {
4561 nb = pad_request(bytes);
4562 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4563 check_malloced_chunk(ms, mem, nb);
4564 goto postaction;
4565 }
4566 }
4567
4568 if (nb <= ms->dvsize) {
4569 size_t rsize = ms->dvsize - nb;
4570 mchunkptr p = ms->dv;
4571 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4572 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4573 ms->dvsize = rsize;
4574 set_size_and_pinuse_of_free_chunk(r, rsize);
4575 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4576 }
4577 else { /* exhaust dv */
4578 size_t dvs = ms->dvsize;
4579 ms->dvsize = 0;
4580 ms->dv = 0;
4581 set_inuse_and_pinuse(ms, p, dvs);
4582 }
4583 mem = chunk2mem(p);
4584 check_malloced_chunk(ms, mem, nb);
4585 goto postaction;
4586 }
4587
4588 else if (nb < ms->topsize) { /* Split top */
4589 size_t rsize = ms->topsize -= nb;
4590 mchunkptr p = ms->top;
4591 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4592 r->head = rsize | PINUSE_BIT;
4593 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4594 mem = chunk2mem(p);
4595 check_top_chunk(ms, ms->top);
4596 check_malloced_chunk(ms, mem, nb);
4597 goto postaction;
4598 }
4599
4600 mem = sys_alloc(ms, nb);
4601
4602 postaction:
4603 POSTACTION(ms);
4604 return mem;
4605 }
4606
4607 return 0;
4608 }
4609
4610 void mspace_free(mspace msp, void* mem) {
4611 if (mem != 0) {
4612 mchunkptr p = mem2chunk(mem);
4613 #if FOOTERS 4845 #if FOOTERS
4614 mstate fm = get_mstate_for(p); 4846 mchunkptr p = mem2chunk (oldmem);
4847 mstate ms = get_mstate_for (p);
4615 #else /* FOOTERS */ 4848 #else /* FOOTERS */
4616 mstate fm = (mstate)msp; 4849 mstate ms = (mstate) msp;
4617 #endif /* FOOTERS */ 4850 #endif /* FOOTERS */
4618 if (!ok_magic(fm)) { 4851 if (!ok_magic (ms)) {
4619 USAGE_ERROR_ACTION(fm, p); 4852 USAGE_ERROR_ACTION (ms, ms);
4620 return; 4853 return 0;
4621 }
4622 if (!PREACTION(fm)) {
4623 check_inuse_chunk(fm, p);
4624 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4625 size_t psize = chunksize(p);
4626 mchunkptr next = chunk_plus_offset(p, psize);
4627 if (!pinuse(p)) {
4628 size_t prevsize = p->prev_foot;
4629 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4630 prevsize &= ~IS_MMAPPED_BIT;
4631 psize += prevsize + MMAP_FOOT_PAD;
4632 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4633 fm->footprint -= psize;
4634 goto postaction;
4635 }
4636 else {
4637 mchunkptr prev = chunk_minus_offset(p, prevsize);
4638 psize += prevsize;
4639 p = prev;
4640 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4641 if (p != fm->dv) {
4642 unlink_chunk(fm, p, prevsize);
4643 }
4644 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4645 fm->dvsize = psize;
4646 set_free_with_pinuse(p, psize, next);
4647 goto postaction;
4648 }
4649 }
4650 else
4651 goto erroraction;
4652 }
4653 } 4854 }
4654 4855 return internal_realloc (ms, oldmem, bytes);
4655 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { 4856 }
4656 if (!cinuse(next)) { /* consolidate forward */ 4857 }
4657 if (next == fm->top) { 4858
4658 size_t tsize = fm->topsize += psize; 4859 void *
4659 fm->top = p; 4860 mspace_memalign (mspace msp, size_t alignment, size_t bytes)
4660 p->head = tsize | PINUSE_BIT; 4861 {
4661 if (p == fm->dv) { 4862 mstate ms = (mstate) msp;
4662 fm->dv = 0; 4863 if (!ok_magic (ms)) {
4663 fm->dvsize = 0; 4864 USAGE_ERROR_ACTION (ms, ms);
4664 } 4865 return 0;
4665 if (should_trim(fm, tsize)) 4866 }
4666 sys_trim(fm, 0); 4867 return internal_memalign (ms, alignment, bytes);
4667 goto postaction; 4868 }
4668 } 4869
4669 else if (next == fm->dv) { 4870 void **
4670 size_t dsize = fm->dvsize += psize; 4871 mspace_independent_calloc (mspace msp, size_t n_elements,
4671 fm->dv = p; 4872 size_t elem_size, void *chunks[])
4672 set_size_and_pinuse_of_free_chunk(p, dsize); 4873 {
4673 goto postaction; 4874 size_t sz = elem_size; /* serves as 1-element array */
4674 } 4875 mstate ms = (mstate) msp;
4675 else { 4876 if (!ok_magic (ms)) {
4676 size_t nsize = chunksize(next); 4877 USAGE_ERROR_ACTION (ms, ms);
4677 psize += nsize; 4878 return 0;
4678 unlink_chunk(fm, next, nsize); 4879 }
4679 set_size_and_pinuse_of_free_chunk(p, psize); 4880 return ialloc (ms, n_elements, &sz, 3, chunks);
4680 if (p == fm->dv) { 4881 }
4681 fm->dvsize = psize; 4882
4682 goto postaction; 4883 void **
4683 } 4884 mspace_independent_comalloc (mspace msp, size_t n_elements,
4684 } 4885 size_t sizes[], void *chunks[])
4685 } 4886 {
4686 else 4887 mstate ms = (mstate) msp;
4687 set_free_with_pinuse(p, psize, next); 4888 if (!ok_magic (ms)) {
4688 insert_chunk(fm, p, psize); 4889 USAGE_ERROR_ACTION (ms, ms);
4689 check_free_chunk(fm, p); 4890 return 0;
4690 goto postaction; 4891 }
4892 return ialloc (ms, n_elements, sizes, 0, chunks);
4893 }
4894
4895 int
4896 mspace_trim (mspace msp, size_t pad)
4897 {
4898 int result = 0;
4899 mstate ms = (mstate) msp;
4900 if (ok_magic (ms)) {
4901 if (!PREACTION (ms)) {
4902 result = sys_trim (ms, pad);
4903 POSTACTION (ms);
4691 } 4904 }
4692 } 4905 } else {
4693 erroraction: 4906 USAGE_ERROR_ACTION (ms, ms);
4694 USAGE_ERROR_ACTION(fm, p); 4907 }
4695 postaction: 4908 return result;
4696 POSTACTION(fm); 4909 }
4697 } 4910
4698 } 4911 void
4699 } 4912 mspace_malloc_stats (mspace msp)
4700 4913 {
4701 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { 4914 mstate ms = (mstate) msp;
4702 void* mem; 4915 if (ok_magic (ms)) {
4703 size_t req = 0; 4916 internal_malloc_stats (ms);
4704 mstate ms = (mstate)msp; 4917 } else {
4705 if (!ok_magic(ms)) { 4918 USAGE_ERROR_ACTION (ms, ms);
4706 USAGE_ERROR_ACTION(ms,ms); 4919 }
4707 return 0; 4920 }
4708 } 4921
4709 if (n_elements != 0) { 4922 size_t
4710 req = n_elements * elem_size; 4923 mspace_footprint (mspace msp)
4711 if (((n_elements | elem_size) & ~(size_t)0xffff) && 4924 {
4712 (req / n_elements != elem_size)) 4925 size_t result;
4713 req = MAX_SIZE_T; /* force downstream failure on overflow */ 4926 mstate ms = (mstate) msp;
4714 } 4927 if (ok_magic (ms)) {
4715 mem = internal_malloc(ms, req); 4928 result = ms->footprint;
4716 if (mem != 0 && calloc_must_clear(mem2chunk(mem))) 4929 }
4717 memset(mem, 0, req); 4930 USAGE_ERROR_ACTION (ms, ms);
4718 return mem; 4931 return result;
4719 } 4932 }
4720 4933
4721 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { 4934
4722 if (oldmem == 0) 4935 size_t
4723 return mspace_malloc(msp, bytes); 4936 mspace_max_footprint (mspace msp)
4724 #ifdef REALLOC_ZERO_BYTES_FREES 4937 {
4725 if (bytes == 0) { 4938 size_t result;
4726 mspace_free(msp, oldmem); 4939 mstate ms = (mstate) msp;
4727 return 0; 4940 if (ok_magic (ms)) {
4728 } 4941 result = ms->max_footprint;
4729 #endif /* REALLOC_ZERO_BYTES_FREES */ 4942 }
4730 else { 4943 USAGE_ERROR_ACTION (ms, ms);
4731 #if FOOTERS 4944 return result;
4732 mchunkptr p = mem2chunk(oldmem);
4733 mstate ms = get_mstate_for(p);
4734 #else /* FOOTERS */
4735 mstate ms = (mstate)msp;
4736 #endif /* FOOTERS */
4737 if (!ok_magic(ms)) {
4738 USAGE_ERROR_ACTION(ms,ms);
4739 return 0;
4740 }
4741 return internal_realloc(ms, oldmem, bytes);
4742 }
4743 }
4744
4745 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
4746 mstate ms = (mstate)msp;
4747 if (!ok_magic(ms)) {
4748 USAGE_ERROR_ACTION(ms,ms);
4749 return 0;
4750 }
4751 return internal_memalign(ms, alignment, bytes);
4752 }
4753
4754 void** mspace_independent_calloc(mspace msp, size_t n_elements,
4755 size_t elem_size, void* chunks[]) {
4756 size_t sz = elem_size; /* serves as 1-element array */
4757 mstate ms = (mstate)msp;
4758 if (!ok_magic(ms)) {
4759 USAGE_ERROR_ACTION(ms,ms);
4760 return 0;
4761 }
4762 return ialloc(ms, n_elements, &sz, 3, chunks);
4763 }
4764
4765 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
4766 size_t sizes[], void* chunks[]) {
4767 mstate ms = (mstate)msp;
4768 if (!ok_magic(ms)) {
4769 USAGE_ERROR_ACTION(ms,ms);
4770 return 0;
4771 }
4772 return ialloc(ms, n_elements, sizes, 0, chunks);
4773 }
4774
4775 int mspace_trim(mspace msp, size_t pad) {
4776 int result = 0;
4777 mstate ms = (mstate)msp;
4778 if (ok_magic(ms)) {
4779 if (!PREACTION(ms)) {
4780 result = sys_trim(ms, pad);
4781 POSTACTION(ms);
4782 }
4783 }
4784 else {
4785 USAGE_ERROR_ACTION(ms,ms);
4786 }
4787 return result;
4788 }
4789
4790 void mspace_malloc_stats(mspace msp) {
4791 mstate ms = (mstate)msp;
4792 if (ok_magic(ms)) {
4793 internal_malloc_stats(ms);
4794 }
4795 else {
4796 USAGE_ERROR_ACTION(ms,ms);
4797 }
4798 }
4799
4800 size_t mspace_footprint(mspace msp) {
4801 size_t result;
4802 mstate ms = (mstate)msp;
4803 if (ok_magic(ms)) {
4804 result = ms->footprint;
4805 }
4806 USAGE_ERROR_ACTION(ms,ms);
4807 return result;
4808 }
4809
4810
4811 size_t mspace_max_footprint(mspace msp) {
4812 size_t result;
4813 mstate ms = (mstate)msp;
4814 if (ok_magic(ms)) {
4815 result = ms->max_footprint;
4816 }
4817 USAGE_ERROR_ACTION(ms,ms);
4818 return result;
4819 } 4945 }
4820 4946
4821 4947
4822 #if !NO_MALLINFO 4948 #if !NO_MALLINFO
4823 struct mallinfo mspace_mallinfo(mspace msp) { 4949 struct mallinfo
4824 mstate ms = (mstate)msp; 4950 mspace_mallinfo (mspace msp)
4825 if (!ok_magic(ms)) { 4951 {
4826 USAGE_ERROR_ACTION(ms,ms); 4952 mstate ms = (mstate) msp;
4827 } 4953 if (!ok_magic (ms)) {
4828 return internal_mallinfo(ms); 4954 USAGE_ERROR_ACTION (ms, ms);
4955 }
4956 return internal_mallinfo (ms);
4829 } 4957 }
4830 #endif /* NO_MALLINFO */ 4958 #endif /* NO_MALLINFO */
4831 4959
4832 int mspace_mallopt(int param_number, int value) { 4960 int
4833 return change_mparam(param_number, value); 4961 mspace_mallopt (int param_number, int value)
4962 {
4963 return change_mparam (param_number, value);
4834 } 4964 }
4835 4965
4836 #endif /* MSPACES */ 4966 #endif /* MSPACES */
4837 4967
4838 /* -------------------- Alternative MORECORE functions ------------------- */ 4968 /* -------------------- Alternative MORECORE functions ------------------- */
5107 structure of old version, but most details differ.) 5237 structure of old version, but most details differ.)
5108 5238
5109 */ 5239 */
5110 5240
5111 #endif /* !HAVE_MALLOC */ 5241 #endif /* !HAVE_MALLOC */
5242 /* vi: set ts=4 sw=4 expandtab: */