525 #ifndef DLMALLOC_VERSION
526 #define DLMALLOC_VERSION 20806
529 #ifndef DLMALLOC_EXPORT
530 #define DLMALLOC_EXPORT extern
538 #define LACKS_FCNTL_H
543 #define WIN32_LEAN_AND_MEAN
547 #define HAVE_MORECORE 0
548 #define LACKS_UNISTD_H
549 #define LACKS_SYS_PARAM_H
550 #define LACKS_SYS_MMAN_H
551 #define LACKS_STRING_H
552 #define LACKS_STRINGS_H
553 #define LACKS_SYS_TYPES_H
554 #define LACKS_ERRNO_H
555 #define LACKS_SCHED_H
556 #ifndef MALLOC_FAILURE_ACTION
557 #define MALLOC_FAILURE_ACTION
561 #define MMAP_CLEARS 0
563 #define MMAP_CLEARS 1
568 #if defined(DARWIN) || defined(_DARWIN)
570 #ifndef HAVE_MORECORE
571 #define HAVE_MORECORE 0
574 #ifndef MALLOC_ALIGNMENT
575 #define MALLOC_ALIGNMENT ((size_t)16U)
580 #ifndef LACKS_SYS_TYPES_H
581 #include <sys/types.h>
585 #define MAX_SIZE_T (~(size_t)0)
588 #define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
589 (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
593 #if ((defined(__GNUC__) && \
594 ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \
595 defined(__i386__) || defined(__x86_64__))) || \
596 (defined(_MSC_VER) && _MSC_VER>=1310))
597 #ifndef USE_SPIN_LOCKS
598 #define USE_SPIN_LOCKS 1
601 #error "USE_SPIN_LOCKS defined without implementation"
603 #elif !defined(USE_SPIN_LOCKS)
604 #define USE_SPIN_LOCKS 0
608 #define ONLY_MSPACES 0
617 #ifndef MALLOC_ALIGNMENT
618 #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
624 #define ABORT abort()
626 #ifndef ABORT_ON_ASSERT_FAILURE
627 #define ABORT_ON_ASSERT_FAILURE 1
629 #ifndef PROCEED_ON_ERROR
630 #define PROCEED_ON_ERROR 0
636 #ifndef MALLOC_INSPECT_ALL
637 #define MALLOC_INSPECT_ALL 0
643 #define MMAP_CLEARS 1
647 #define HAVE_MREMAP 1
650 #define HAVE_MREMAP 0
653 #ifndef MALLOC_FAILURE_ACTION
654 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
656 #ifndef HAVE_MORECORE
658 #define HAVE_MORECORE 0
660 #define HAVE_MORECORE 1
664 #define MORECORE_CONTIGUOUS 0
666 #define MORECORE_DEFAULT sbrk
667 #ifndef MORECORE_CONTIGUOUS
668 #define MORECORE_CONTIGUOUS 1
671 #ifndef DEFAULT_GRANULARITY
672 #if (MORECORE_CONTIGUOUS || defined(WIN32))
673 #define DEFAULT_GRANULARITY (0)
675 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
678 #ifndef DEFAULT_TRIM_THRESHOLD
679 #ifndef MORECORE_CANNOT_TRIM
680 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
682 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
685 #ifndef DEFAULT_MMAP_THRESHOLD
687 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
689 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
692 #ifndef MAX_RELEASE_CHECK_RATE
694 #define MAX_RELEASE_CHECK_RATE 4095
696 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
699 #ifndef USE_BUILTIN_FFS
700 #define USE_BUILTIN_FFS 0
702 #ifndef USE_DEV_RANDOM
703 #define USE_DEV_RANDOM 0
706 #define NO_MALLINFO 0
708 #ifndef MALLINFO_FIELD_TYPE
709 #define MALLINFO_FIELD_TYPE size_t
711 #ifndef NO_MALLOC_STATS
712 #define NO_MALLOC_STATS 0
714 #ifndef NO_SEGMENT_TRAVERSAL
715 #define NO_SEGMENT_TRAVERSAL 0
725 #define M_TRIM_THRESHOLD (-1)
726 #define M_GRANULARITY (-2)
727 #define M_MMAP_THRESHOLD (-3)
756 #ifdef HAVE_USR_INCLUDE_MALLOC_H
757 #include "/usr/include/malloc.h"
759 #ifndef STRUCT_MALLINFO_DECLARED
761 #define _STRUCT_MALLINFO
762 #define STRUCT_MALLINFO_DECLARED 1
785 #if defined(__GNUC__)
786 #define FORCEINLINE __inline __attribute__ ((always_inline))
787 #elif defined(_MSC_VER)
788 #define FORCEINLINE __forceinline
792 #if defined(__GNUC__)
793 #define NOINLINE __attribute__ ((noinline))
794 #elif defined(_MSC_VER)
795 #define NOINLINE __declspec(noinline)
804 #define FORCEINLINE inline
815 #ifndef USE_DL_PREFIX
816 #define dlcalloc calloc
818 #define dlmalloc malloc
819 #define dlmemalign memalign
820 #define dlposix_memalign posix_memalign
821 #define dlrealloc realloc
822 #define dlrealloc_in_place realloc_in_place
823 #define dlvalloc valloc
824 #define dlpvalloc pvalloc
825 #define dlmallinfo mallinfo
826 #define dlmallopt mallopt
827 #define dlmalloc_trim malloc_trim
828 #define dlmalloc_stats malloc_stats
829 #define dlmalloc_usable_size malloc_usable_size
830 #define dlmalloc_footprint malloc_footprint
831 #define dlmalloc_max_footprint malloc_max_footprint
832 #define dlmalloc_footprint_limit malloc_footprint_limit
833 #define dlmalloc_set_footprint_limit malloc_set_footprint_limit
834 #define dlmalloc_inspect_all malloc_inspect_all
835 #define dlindependent_calloc independent_calloc
836 #define dlindependent_comalloc independent_comalloc
837 #define dlbulk_free bulk_free
1015 #if MALLOC_INSPECT_ALL
1270 typedef void* mspace;
1302 DLMALLOC_EXPORT mspace create_mspace_with_base(
void* base,
size_t capacity,
int locked);
1315 DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp,
int enable);
1343 DLMALLOC_EXPORT void* mspace_realloc(mspace msp,
void* mem,
size_t newsize);
1349 DLMALLOC_EXPORT void* mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size);
1355 DLMALLOC_EXPORT void* mspace_memalign(mspace msp,
size_t alignment,
size_t bytes);
1361 DLMALLOC_EXPORT int mspace_posix_memalign(mspace msp,
void** pp,
size_t alignment,
size_t bytes);
1367 DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp,
size_t n_elements,
1368 size_t elem_size,
void* chunks[]);
1374 DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp,
size_t n_elements,
1375 size_t sizes[],
void* chunks[]);
1439 #pragma warning( disable : 4146 )
1441 #if !NO_MALLOC_STATS
1444 #ifndef LACKS_ERRNO_H
1448 #if ABORT_ON_ASSERT_FAILURE
1450 #define assert(x) if(!(x)) ABORT
1460 #if !defined(WIN32) && !defined(LACKS_TIME_H)
1463 #ifndef LACKS_STDLIB_H
1466 #ifndef LACKS_STRING_H
1470 #ifndef LACKS_STRINGS_H
1471 #include <strings.h>
1475 #ifndef LACKS_SYS_MMAN_H
1477 #if (defined(linux) && !defined(__USE_GNU))
1479 #include <sys/mman.h>
1482 #include <sys/mman.h>
1485 #ifndef LACKS_FCNTL_H
1489 #ifndef LACKS_UNISTD_H
1492 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1493 extern void* sbrk(ptrdiff_t);
1500 #if defined (__SVR4) && defined (__sun)
1502 #elif !defined(LACKS_SCHED_H)
1505 #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
1506 #include <pthread.h>
1508 #elif defined(_MSC_VER)
1514 LONG __cdecl _InterlockedCompareExchange(LONG
volatile *Dest, LONG Exchange, LONG Comp);
1515 LONG __cdecl _InterlockedExchange(LONG
volatile *Target, LONG Value);
1520 #pragma intrinsic (_InterlockedCompareExchange)
1521 #pragma intrinsic (_InterlockedExchange)
1522 #define interlockedcompareexchange _InterlockedCompareExchange
1523 #define interlockedexchange _InterlockedExchange
1524 #elif defined(WIN32) && defined(__GNUC__)
1525 #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
1526 #define interlockedexchange __sync_lock_test_and_set
1531 #ifndef LOCK_AT_FORK
1532 #define LOCK_AT_FORK 0
1536 #if defined(_MSC_VER) && _MSC_VER>=1300
1537 #ifndef BitScanForward
1541 unsigned char _BitScanForward(
unsigned long *index,
unsigned long mask);
1542 unsigned char _BitScanReverse(
unsigned long *index,
unsigned long mask);
1547 #define BitScanForward _BitScanForward
1548 #define BitScanReverse _BitScanReverse
1549 #pragma intrinsic(_BitScanForward)
1550 #pragma intrinsic(_BitScanReverse)
1555 #ifndef malloc_getpagesize
1556 # ifdef _SC_PAGESIZE
1557 # ifndef _SC_PAGE_SIZE
1558 # define _SC_PAGE_SIZE _SC_PAGESIZE
1561 # ifdef _SC_PAGE_SIZE
1562 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1564 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1565 extern size_t getpagesize();
1566 # define malloc_getpagesize getpagesize()
1569 # define malloc_getpagesize getpagesize()
1571 # ifndef LACKS_SYS_PARAM_H
1572 # include <sys/param.h>
1574 # ifdef EXEC_PAGESIZE
1575 # define malloc_getpagesize EXEC_PAGESIZE
1579 # define malloc_getpagesize NBPG
1581 # define malloc_getpagesize (NBPG * CLSIZE)
1585 # define malloc_getpagesize NBPC
1588 # define malloc_getpagesize PAGESIZE
1590 # define malloc_getpagesize ((size_t)4096U)
1604 #define SIZE_T_SIZE (sizeof(size_t))
1605 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1609 #define SIZE_T_ZERO ((size_t)0)
1610 #define SIZE_T_ONE ((size_t)1)
1611 #define SIZE_T_TWO ((size_t)2)
1612 #define SIZE_T_FOUR ((size_t)4)
1613 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1614 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1615 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1616 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1619 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1622 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1625 #define align_offset(A)\
1626 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1627 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1639 #define MFAIL ((void*)(MAX_SIZE_T))
1640 #define CMFAIL ((char*)(MFAIL))
1645 #define MUNMAP_DEFAULT(a, s) munmap((a), (s))
1646 #define MMAP_PROT (PROT_READ|PROT_WRITE)
1647 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1648 #define MAP_ANONYMOUS MAP_ANON
1650 #ifdef MAP_ANONYMOUS
1651 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1652 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1658 #define MMAP_FLAGS (MAP_PRIVATE)
1659 static int dev_zero_fd = -1;
1660 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
1661 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1662 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1663 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1666 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
1672 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
1673 return (ptr != 0)? ptr:
MFAIL;
1677 static FORCEINLINE void* win32direct_mmap(
size_t size) {
1678 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
1680 return (ptr != 0)? ptr:
MFAIL;
1684 static FORCEINLINE int win32munmap(
void* ptr,
size_t size) {
1685 MEMORY_BASIC_INFORMATION minfo;
1686 char* cptr = (
char*)ptr;
1688 if (VirtualQuery(cptr, &minfo,
sizeof(minfo)) == 0)
1690 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1691 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1693 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1695 cptr += minfo.RegionSize;
1696 size -= minfo.RegionSize;
1701 #define MMAP_DEFAULT(s) win32mmap(s)
1702 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
1703 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
1709 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1719 #define CALL_MORECORE(S) MORECORE(S)
1721 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
1724 #define CALL_MORECORE(S) MFAIL
1731 #define USE_MMAP_BIT (SIZE_T_ONE)
1734 #define CALL_MMAP(s) MMAP(s)
1736 #define CALL_MMAP(s) MMAP_DEFAULT(s)
1739 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
1741 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
1744 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1746 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
1749 #define USE_MMAP_BIT (SIZE_T_ZERO)
1751 #define MMAP(s) MFAIL
1752 #define MUNMAP(a, s) (-1)
1753 #define DIRECT_MMAP(s) MFAIL
1754 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1755 #define CALL_MMAP(s) MMAP(s)
1756 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
1762 #if HAVE_MMAP && HAVE_MREMAP
1764 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
1766 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
1769 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1773 #define USE_NONCONTIGUOUS_BIT (4U)
1776 #define EXTERN_BIT (8U)
1810 #define USE_LOCK_BIT (0U)
1811 #define INITIAL_LOCK(l) (0)
1812 #define DESTROY_LOCK(l) (0)
1813 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
1814 #define RELEASE_MALLOC_GLOBAL_LOCK()
1827 #elif USE_SPIN_LOCKS
1832 #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
1833 #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
1834 #define CLEAR_LOCK(sl) __sync_lock_release(sl)
1836 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
1842 __asm__ __volatile__ (
"lock; cmpxchgl %1, %2"
1844 :
"r" (val),
"m" (*(sl)),
"0"(cmp)
1853 __asm__ __volatile__ (
"lock; xchgl %0, %1"
1855 :
"m" (*(sl)),
"0"(prev)
1859 #define CAS_LOCK(sl) x86_cas_lock(sl)
1860 #define CLEAR_LOCK(sl) x86_clear_lock(sl)
1863 #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
1864 #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
1869 #define SPINS_PER_YIELD 63
1870 #if defined(_MSC_VER)
1871 #define SLEEP_EX_DURATION 50
1872 #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
1873 #elif defined (__SVR4) && defined (__sun)
1874 #define SPIN_LOCK_YIELD thr_yield();
1875 #elif !defined(LACKS_SCHED_H)
1876 #define SPIN_LOCK_YIELD sched_yield();
1878 #define SPIN_LOCK_YIELD
1881 #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
1883 static int spin_acquire_lock(
int *sl) {
1885 while (*(
volatile int *)sl != 0 || CAS_LOCK(sl)) {
1886 if ((++spins & SPINS_PER_YIELD) == 0) {
1894 #define TRY_LOCK(sl) !CAS_LOCK(sl)
1895 #define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
1896 #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
1897 #define INITIAL_LOCK(sl) (*sl = 0)
1898 #define DESTROY_LOCK(sl) (0)
1899 static MLOCK_T malloc_global_mutex = 0;
1904 #define THREAD_ID_T DWORD
1905 #define CURRENT_THREAD GetCurrentThreadId()
1906 #define EQ_OWNER(X,Y) ((X) == (Y))
1913 #define THREAD_ID_T pthread_t
1914 #define CURRENT_THREAD pthread_self()
1915 #define EQ_OWNER(X,Y) pthread_equal(X, Y)
1918 struct malloc_recursive_lock {
1921 THREAD_ID_T threadid;
1924 #define MLOCK_T struct malloc_recursive_lock
1925 static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
1930 CLEAR_LOCK(&lk->sl);
1935 THREAD_ID_T mythreadid = CURRENT_THREAD;
1938 if (*((
volatile int *)(&lk->sl)) == 0) {
1939 if (!CAS_LOCK(&lk->sl)) {
1940 lk->threadid = mythreadid;
1945 else if (EQ_OWNER(lk->threadid, mythreadid)) {
1949 if ((++spins & SPINS_PER_YIELD) == 0) {
1956 THREAD_ID_T mythreadid = CURRENT_THREAD;
1957 if (*((
volatile int *)(&lk->sl)) == 0) {
1958 if (!CAS_LOCK(&lk->sl)) {
1959 lk->threadid = mythreadid;
1964 else if (EQ_OWNER(lk->threadid, mythreadid)) {
1971 #define RELEASE_LOCK(lk) recursive_release_lock(lk)
1972 #define TRY_LOCK(lk) recursive_try_lock(lk)
1973 #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
1974 #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
1975 #define DESTROY_LOCK(lk) (0)
1978 #elif defined(WIN32)
1979 #define MLOCK_T CRITICAL_SECTION
1980 #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
1981 #define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
1982 #define TRY_LOCK(lk) TryEnterCriticalSection(lk)
1983 #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
1984 #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
1985 #define NEED_GLOBAL_LOCK_INIT
1987 static MLOCK_T malloc_global_mutex;
1988 static volatile LONG malloc_global_mutex_status;
1991 static void init_malloc_global_mutex() {
1993 long stat = malloc_global_mutex_status;
1998 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
1999 InitializeCriticalSection(&malloc_global_mutex);
2000 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
2008 #define MLOCK_T pthread_mutex_t
2009 #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
2010 #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
2011 #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
2012 #define INITIAL_LOCK(lk) pthread_init_lock(lk)
2013 #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
2015 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
2018 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
2020 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
2021 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
2024 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
2026 static int pthread_init_lock (
MLOCK_T *lk) {
2027 pthread_mutexattr_t attr;
2028 if (pthread_mutexattr_init(&attr))
return 1;
2029 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
2030 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
return 1;
2032 if (pthread_mutex_init(lk, &attr))
return 1;
2033 if (pthread_mutexattr_destroy(&attr))
return 1;
2040 #define USE_LOCK_BIT (2U)
2042 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
2043 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
2046 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
2047 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
2205 #define MCHUNK_SIZE (sizeof(mchunk))
2208 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2210 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
2214 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2216 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
2219 #define MIN_CHUNK_SIZE\
2220 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2223 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
2224 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
2226 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
2229 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
2230 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
2233 #define pad_request(req) \
2234 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2237 #define request2size(req) \
2238 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
2251 #define PINUSE_BIT (SIZE_T_ONE)
2252 #define CINUSE_BIT (SIZE_T_TWO)
2253 #define FLAG4_BIT (SIZE_T_FOUR)
2254 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
2255 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
2258 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
2261 #define cinuse(p) ((p)->head & CINUSE_BIT)
2262 #define pinuse(p) ((p)->head & PINUSE_BIT)
2263 #define flag4inuse(p) ((p)->head & FLAG4_BIT)
2264 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
2265 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
2267 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
2269 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
2270 #define set_flag4(p) ((p)->head |= FLAG4_BIT)
2271 #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
2274 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2275 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
2278 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
2279 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
2282 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
2285 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
2286 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
2289 #define set_size_and_pinuse_of_free_chunk(p, s)\
2290 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
2293 #define set_free_with_pinuse(p, s, n)\
2294 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
2297 #define overhead_for(p)\
2298 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
2302 #define calloc_must_clear(p) (!is_mmapped(p))
2304 #define calloc_must_clear(p) (1)
2415 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
2481 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
2482 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
2575 #define NSMALLBINS (32U)
2576 #define NTREEBINS (32U)
2577 #define SMALLBIN_SHIFT (3U)
2578 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2579 #define TREEBIN_SHIFT (8U)
2580 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2581 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2582 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2632 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
2639 #define is_global(M) ((M) == &_gm_)
2643 #define is_initialized(M) ((M)->top != 0)
2649 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2650 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2652 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2654 #define disable_lock(M)
2657 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2658 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2660 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2662 #define disable_mmap(M)
2665 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2666 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2668 #define set_lock(M,L)\
2669 ((M)->mflags = (L)?\
2670 ((M)->mflags | USE_LOCK_BIT) :\
2671 ((M)->mflags & ~USE_LOCK_BIT))
2674 #define page_align(S)\
2675 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
2678 #define granularity_align(S)\
2679 (((S) + (mparams.granularity - SIZE_T_ONE))\
2680 & ~(mparams.granularity - SIZE_T_ONE))
2685 #define mmap_align(S) granularity_align(S)
2687 #define mmap_align(S) page_align(S)
2691 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
2693 #define is_page_aligned(S)\
2694 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2695 #define is_granularity_aligned(S)\
2696 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2699 #define segment_holds(S, A)\
2700 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2703 static msegmentptr segment_holding(mstate m,
char* addr) {
2704 msegmentptr sp = &m->
seg;
2706 if (addr >= sp->
base && addr < sp->base + sp->
size)
2708 if ((sp = sp->
next) == 0)
2714 static int has_segment_link(mstate m, msegmentptr ss) {
2715 msegmentptr sp = &m->
seg;
2717 if ((
char*)sp >= ss->
base && (
char*)sp < ss->base + ss->
size)
2719 if ((sp = sp->
next) == 0)
2724 #ifndef MORECORE_CANNOT_TRIM
2725 #define should_trim(M,s) ((s) > (M)->trim_check)
2727 #define should_trim(M,s) (0)
2735 #define TOP_FOOT_SIZE\
2736 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2748 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2749 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2753 #define PREACTION(M) (0)
2757 #define POSTACTION(M)
2770 #if PROCEED_ON_ERROR
2773 int malloc_corruption_error_count;
2776 static void reset_on_error(mstate m);
2778 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2779 #define USAGE_ERROR_ACTION(m, p)
2783 #ifndef CORRUPTION_ERROR_ACTION
2784 #define CORRUPTION_ERROR_ACTION(m) ABORT
2787 #ifndef USAGE_ERROR_ACTION
2788 #define USAGE_ERROR_ACTION(m,p) ABORT
2798 #define check_free_chunk(M,P)
2799 #define check_inuse_chunk(M,P)
2800 #define check_malloced_chunk(M,P,N)
2801 #define check_mmapped_chunk(M,P)
2802 #define check_malloc_state(M)
2803 #define check_top_chunk(M,P)
2806 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
2807 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2808 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
2809 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2810 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2811 #define check_malloc_state(M) do_check_malloc_state(M)
2813 static void do_check_any_chunk(mstate m, mchunkptr
p);
2814 static void do_check_top_chunk(mstate m, mchunkptr
p);
2815 static void do_check_mmapped_chunk(mstate m, mchunkptr
p);
2816 static void do_check_inuse_chunk(mstate m, mchunkptr
p);
2817 static void do_check_free_chunk(mstate m, mchunkptr
p);
2818 static void do_check_malloced_chunk(mstate m,
void* mem,
size_t s);
2819 static void do_check_tree(mstate m, tchunkptr
t);
2820 static void do_check_treebin(mstate m, bindex_t
i);
2821 static void do_check_smallbin(mstate m, bindex_t
i);
2822 static void do_check_malloc_state(mstate m);
2823 static int bin_find(mstate m, mchunkptr x);
2824 static size_t traverse_and_check(mstate m);
2829 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2830 #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
2831 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2832 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2835 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2836 #define treebin_at(M,i) (&((M)->treebins[i]))
2839 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2840 #define compute_tree_index(S, I)\
2842 unsigned int X = S >> TREEBIN_SHIFT;\
2845 else if (X > 0xFFFF)\
2848 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
2849 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2853 #elif defined (__INTEL_COMPILER)
2854 #define compute_tree_index(S, I)\
2856 size_t X = S >> TREEBIN_SHIFT;\
2859 else if (X > 0xFFFF)\
2862 unsigned int K = _bit_scan_reverse (X); \
2863 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2867 #elif defined(_MSC_VER) && _MSC_VER>=1300
2868 #define compute_tree_index(S, I)\
2870 size_t X = S >> TREEBIN_SHIFT;\
2873 else if (X > 0xFFFF)\
2877 _BitScanReverse((DWORD *) &K, (DWORD) X);\
2878 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2883 #define compute_tree_index(S, I)\
2885 size_t X = S >> TREEBIN_SHIFT;\
2888 else if (X > 0xFFFF)\
2891 unsigned int Y = (unsigned int)X;\
2892 unsigned int N = ((Y - 0x100) >> 16) & 8;\
2893 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2895 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2896 K = 14 - N + ((Y <<= K) >> 15);\
2897 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2903 #define bit_for_tree_index(i) \
2904 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2907 #define leftshift_for_tree_index(i) \
2908 ((i == NTREEBINS-1)? 0 : \
2909 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2912 #define minsize_for_tree_index(i) \
2913 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2914 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2920 #define idx2bit(i) ((binmap_t)(1) << (i))
2923 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2924 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2925 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2927 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2928 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2929 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2932 #define least_bit(x) ((x) & -(x))
2935 #define left_bits(x) ((x<<1) | -(x<<1))
2938 #define same_or_left_bits(x) ((x) | -(x))
2942 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2943 #define compute_bit2idx(X, I)\
2946 J = __builtin_ctz(X); \
2950 #elif defined (__INTEL_COMPILER)
2951 #define compute_bit2idx(X, I)\
2954 J = _bit_scan_forward (X); \
2958 #elif defined(_MSC_VER) && _MSC_VER>=1300
2959 #define compute_bit2idx(X, I)\
2962 _BitScanForward((DWORD *) &J, X);\
2966 #elif USE_BUILTIN_FFS
2967 #define compute_bit2idx(X, I) I = ffs(X)-1
2970 #define compute_bit2idx(X, I)\
2972 unsigned int Y = X - 1;\
2973 unsigned int K = Y >> (16-4) & 16;\
2974 unsigned int N = K; Y >>= K;\
2975 N += K = Y >> (8-3) & 8; Y >>= K;\
2976 N += K = Y >> (4-2) & 4; Y >>= K;\
2977 N += K = Y >> (2-1) & 2; Y >>= K;\
2978 N += K = Y >> (1-0) & 1; Y >>= K;\
2979 I = (bindex_t)(N + Y);\
3014 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
3016 #define ok_next(p, n) ((char*)(p) < (char*)(n))
3018 #define ok_inuse(p) is_inuse(p)
3020 #define ok_pinuse(p) pinuse(p)
3023 #define ok_address(M, a) (1)
3024 #define ok_next(b, n) (1)
3025 #define ok_inuse(p) (1)
3026 #define ok_pinuse(p) (1)
3029 #if (FOOTERS && !INSECURE)
3031 #define ok_magic(M) ((M)->magic == mparams.magic)
3033 #define ok_magic(M) (1)
3038 #if defined(__GNUC__) && __GNUC__ >= 3
3039 #define RTCHECK(e) __builtin_expect(e, 1)
3041 #define RTCHECK(e) (e)
3044 #define RTCHECK(e) (1)
3051 #define mark_inuse_foot(M,p,s)
3056 #define set_inuse(M,p,s)\
3057 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
3058 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
3061 #define set_inuse_and_pinuse(M,p,s)\
3062 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3063 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
3066 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
3067 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
3072 #define mark_inuse_foot(M,p,s)\
3073 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
3075 #define get_mstate_for(p)\
3076 ((mstate)(((mchunkptr)((char*)(p) +\
3077 (chunksize(p))))->prev_foot ^ mparams.magic))
3079 #define set_inuse(M,p,s)\
3080 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
3081 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
3082 mark_inuse_foot(M,p,s))
3084 #define set_inuse_and_pinuse(M,p,s)\
3085 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3086 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
3087 mark_inuse_foot(M,p,s))
3089 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
3090 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
3091 mark_inuse_foot(M, p, s))
3104 static int init_mparams(
void) {
3105 #ifdef NEED_GLOBAL_LOCK_INIT
3106 if (malloc_global_mutex_status <= 0)
3107 init_malloc_global_mutex();
3111 if (mparams.
magic == 0) {
3121 SYSTEM_INFO system_info;
3122 GetSystemInfo(&system_info);
3123 psize = system_info.dwPageSize;
3135 if ((
sizeof(
size_t) !=
sizeof(
char*)) ||
3137 (
sizeof(
int) < 4) ||
3148 #if MORECORE_CONTIGUOUS
3160 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
3166 unsigned char buf[
sizeof(size_t)];
3168 if ((fd = open(
"/dev/urandom", O_RDONLY)) >= 0 &&
3169 read(fd, buf,
sizeof(buf)) ==
sizeof(buf)) {
3170 magic = *((
size_t *) buf);
3176 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
3177 #elif defined(LACKS_TIME_H)
3178 magic = (size_t)&magic ^ (
size_t)0x55555555U;
3180 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
3182 magic |= (size_t)8U;
3183 magic &= ~(size_t)7U;
3185 (*(
volatile size_t *)(&(mparams.
magic))) =
magic;
3194 static int change_mparam(
int param_number,
int value) {
3197 val = (value == -1)?
MAX_SIZE_T : (
size_t)value;
3198 switch(param_number) {
3203 if (val >= mparams.
page_size && ((val & (val-1)) == 0)) {
3221 static void do_check_any_chunk(mstate m, mchunkptr
p) {
3227 static void do_check_top_chunk(mstate m, mchunkptr p) {
3228 msegmentptr sp = segment_holding(m, (
char*)p);
3241 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
3255 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
3256 do_check_any_chunk(m, p);
3262 do_check_mmapped_chunk(m, p);
3266 static void do_check_free_chunk(mstate m, mchunkptr p) {
3269 do_check_any_chunk(m, p);
3273 if (p != m->
dv && p != m->
top) {
3289 static void do_check_malloced_chunk(mstate m,
void* mem,
size_t s) {
3293 do_check_inuse_chunk(m, p);
3303 static void do_check_tree(mstate m, tchunkptr
t) {
3306 bindex_t tindex = t->
index;
3316 do_check_any_chunk(m, ((mchunkptr)u));
3333 *((tbinptr*)(u->
parent)) == u);
3334 if (u->
child[0] != 0) {
3337 do_check_tree(m, u->
child[0]);
3339 if (u->
child[1] != 0) {
3342 do_check_tree(m, u->
child[1]);
3354 static void do_check_treebin(mstate m, bindex_t
i) {
3357 int empty = (m->
treemap & (1U <<
i)) == 0;
3361 do_check_tree(m, t);
3365 static void do_check_smallbin(mstate m, bindex_t i) {
3367 mchunkptr p = b->
bk;
3368 unsigned int empty = (m->
smallmap & (1U <<
i)) == 0;
3372 for (; p != b; p = p->
bk) {
3376 do_check_free_chunk(m, p);
3383 do_check_inuse_chunk(m, q);
3389 static int bin_find(mstate m, mchunkptr x) {
3399 }
while ((p = p->
fd) != b);
3408 while (t != 0 &&
chunksize(t) != size) {
3415 if (u == (tchunkptr)x)
3417 }
while ((u = u->
fd) !=
t);
3425 static size_t traverse_and_check(mstate m) {
3428 msegmentptr s = &m->
seg;
3432 mchunkptr lastq = 0;
3439 do_check_inuse_chunk(m, q);
3442 assert(q == m->
dv || bin_find(m, q));
3444 do_check_free_chunk(m, q);
3457 static void do_check_malloc_state(mstate m) {
3462 do_check_smallbin(m, i);
3464 do_check_treebin(m, i);
3467 do_check_any_chunk(m, m->
dv);
3474 do_check_top_chunk(m, m->
top);
3480 total = traverse_and_check(m);
3489 static struct mallinfo internal_mallinfo(mstate m) {
3490 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3498 msegmentptr s = &m->
seg;
3529 #if !NO_MALLOC_STATS
3530 static void internal_malloc_stats(mstate m) {
3538 msegmentptr s = &m->
seg;
3555 fprintf(stderr,
"max system bytes = %10lu\n", (
unsigned long)(maxfp));
3556 fprintf(stderr,
"system bytes = %10lu\n", (
unsigned long)(fp));
3557 fprintf(stderr,
"in use bytes = %10lu\n", (
unsigned long)(used));
3572 #define insert_small_chunk(M, P, S) {\
3573 bindex_t I = small_index(S);\
3574 mchunkptr B = smallbin_at(M, I);\
3576 assert(S >= MIN_CHUNK_SIZE);\
3577 if (!smallmap_is_marked(M, I))\
3578 mark_smallmap(M, I);\
3579 else if (RTCHECK(ok_address(M, B->fd)))\
3582 CORRUPTION_ERROR_ACTION(M);\
3591 #define unlink_small_chunk(M, P, S) {\
3592 mchunkptr F = P->fd;\
3593 mchunkptr B = P->bk;\
3594 bindex_t I = small_index(S);\
3597 assert(chunksize(P) == small_index2size(I));\
3598 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
3600 clear_smallmap(M, I);\
3602 else if (RTCHECK(B == smallbin_at(M,I) ||\
3603 (ok_address(M, B) && B->fd == P))) {\
3608 CORRUPTION_ERROR_ACTION(M);\
3612 CORRUPTION_ERROR_ACTION(M);\
3617 #define unlink_first_small_chunk(M, B, P, I) {\
3618 mchunkptr F = P->fd;\
3621 assert(chunksize(P) == small_index2size(I));\
3623 clear_smallmap(M, I);\
3625 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
3630 CORRUPTION_ERROR_ACTION(M);\
3636 #define replace_dv(M, P, S) {\
3637 size_t DVS = M->dvsize;\
3638 assert(is_small(DVS));\
3640 mchunkptr DV = M->dv;\
3641 insert_small_chunk(M, DV, DVS);\
3650 #define insert_large_chunk(M, X, S) {\
3653 compute_tree_index(S, I);\
3654 H = treebin_at(M, I);\
3656 X->child[0] = X->child[1] = 0;\
3657 if (!treemap_is_marked(M, I)) {\
3658 mark_treemap(M, I);\
3660 X->parent = (tchunkptr)H;\
3665 size_t K = S << leftshift_for_tree_index(I);\
3667 if (chunksize(T) != S) {\
3668 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3672 else if (RTCHECK(ok_address(M, C))) {\
3679 CORRUPTION_ERROR_ACTION(M);\
3684 tchunkptr F = T->fd;\
3685 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3693 CORRUPTION_ERROR_ACTION(M);\
3718 #define unlink_large_chunk(M, X) {\
3719 tchunkptr XP = X->parent;\
3722 tchunkptr F = X->fd;\
3724 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
3729 CORRUPTION_ERROR_ACTION(M);\
3734 if (((R = *(RP = &(X->child[1]))) != 0) ||\
3735 ((R = *(RP = &(X->child[0]))) != 0)) {\
3737 while ((*(CP = &(R->child[1])) != 0) ||\
3738 (*(CP = &(R->child[0])) != 0)) {\
3741 if (RTCHECK(ok_address(M, RP)))\
3744 CORRUPTION_ERROR_ACTION(M);\
3749 tbinptr* H = treebin_at(M, X->index);\
3751 if ((*H = R) == 0) \
3752 clear_treemap(M, X->index);\
3754 else if (RTCHECK(ok_address(M, XP))) {\
3755 if (XP->child[0] == X) \
3761 CORRUPTION_ERROR_ACTION(M);\
3763 if (RTCHECK(ok_address(M, R))) {\
3766 if ((C0 = X->child[0]) != 0) {\
3767 if (RTCHECK(ok_address(M, C0))) {\
3772 CORRUPTION_ERROR_ACTION(M);\
3774 if ((C1 = X->child[1]) != 0) {\
3775 if (RTCHECK(ok_address(M, C1))) {\
3780 CORRUPTION_ERROR_ACTION(M);\
3784 CORRUPTION_ERROR_ACTION(M);\
3791 #define insert_chunk(M, P, S)\
3792 if (is_small(S)) insert_small_chunk(M, P, S)\
3793 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3795 #define unlink_chunk(M, P, S)\
3796 if (is_small(S)) unlink_small_chunk(M, P, S)\
3797 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3803 #define internal_malloc(m, b) mspace_malloc(m, b)
3804 #define internal_free(m, mem) mspace_free(m,mem);
3807 #define internal_malloc(m, b)\
3808 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
3809 #define internal_free(m, mem)\
3810 if (m == gm) dlfree(mem); else mspace_free(m,mem);
3812 #define internal_malloc(m, b) dlmalloc(b)
3813 #define internal_free(m, mem) dlfree(mem)
3828 static void* mmap_alloc(mstate m,
size_t nb) {
3847 if (m->
least_addr == 0 || mm < m->least_addr)
3860 static mchunkptr mmap_resize(mstate m, mchunkptr oldp,
size_t nb,
int flags) {
3873 char* cp = (
char*)
CALL_MREMAP((
char*)oldp - offset,
3874 oldmmsize, newmmsize, flags);
3876 mchunkptr newp = (
mchunkptr)(cp + offset);
3883 if (cp < m->least_addr)
3898 static void init_top(mstate m, mchunkptr p,
size_t psize) {
3913 static void init_bins(mstate m) {
3918 bin->
fd = bin->
bk = bin;
3922 #if PROCEED_ON_ERROR
3925 static void reset_on_error(mstate m) {
3927 ++malloc_corruption_error_count;
3942 static void* prepend_alloc(mstate m,
char* newbase,
char* oldbase,
3946 size_t psize = (
char*)oldfirst - (
char*)
p;
3948 size_t qsize = psize - nb;
3951 assert((
char*)oldfirst > (
char*)q);
3956 if (oldfirst == m->
top) {
3957 size_t tsize = m->
topsize += qsize;
3962 else if (oldfirst == m->
dv) {
3963 size_t dsize = m->
dvsize += qsize;
3984 static void add_segment(mstate m,
char* tbase,
size_t tsize, flag_t mmapped) {
3986 char* old_top = (
char*)m->
top;
3987 msegmentptr oldsp = segment_holding(m, old_top);
3988 char* old_end = oldsp->
base + oldsp->size;
3992 char* asp = rawsp + offset;
3997 mchunkptr p = tnext;
4017 if ((
char*)(&(nextp->
head)) < old_end)
4025 if (csp != old_top) {
4027 size_t psize = csp - old_top;
4039 static void* sys_alloc(mstate m,
size_t nb) {
4042 flag_t mmap_flag = 0;
4049 void* mem = mmap_alloc(m, nb);
4087 size_t ssize = asize;
4088 msegmentptr ss = (m->
top == 0)? 0 : segment_holding(m, (
char*)m->
top);
4097 ssize += (
page_align((
size_t)base) - (size_t)base);
4101 (fp > m->
footprint && fp <= m->footprint_limit)) &&
4164 size_t ssize = end - br;
4179 if (m->
least_addr == 0 || tbase < m->least_addr)
4195 init_top(m, mn, (
size_t)((tbase + tsize) - (
char*)mn) -
TOP_FOOT_SIZE);
4201 msegmentptr sp = &m->
seg;
4203 while (sp != 0 && tbase != sp->
base + sp->
size)
4213 if (tbase < m->least_addr)
4216 while (sp != 0 && sp->
base != tbase + tsize)
4221 char* oldbase = sp->
base;
4224 return prepend_alloc(m, tbase, oldbase, nb);
4227 add_segment(m, tbase, tsize, mmap_flag);
4231 if (nb < m->topsize) {
4232 size_t rsize = m->
topsize -= nb;
4233 mchunkptr p = m->
top;
4250 static size_t release_unused_segments(mstate m) {
4251 size_t released = 0;
4253 msegmentptr pred = &m->
seg;
4254 msegmentptr sp = pred->
next;
4256 char* base = sp->
base;
4257 size_t size = sp->
size;
4258 msegmentptr next = sp->
next;
4297 static int sys_trim(mstate m,
size_t pad) {
4298 size_t released = 0;
4308 msegmentptr sp = segment_holding(m, (
char*)m->
top);
4313 sp->
size >= extra &&
4314 !has_segment_link(m, sp)) {
4315 size_t newsize = sp->
size - extra;
4331 if (old_br == sp->
base + sp->
size) {
4334 if (rel_br !=
CMFAIL && new_br < old_br)
4335 released = old_br - new_br;
4342 if (released != 0) {
4343 sp->
size -= released;
4352 released += release_unused_segments(m);
4359 return (released != 0)? 1 : 0;
4365 static void dispose_chunk(mstate m, mchunkptr p,
size_t psize) {
4396 if (next == m->
top) {
4397 size_t tsize = m->
topsize += psize;
4406 else if (next == m->
dv) {
4407 size_t dsize = m->
dvsize += psize;
4436 static void* tmalloc_large(mstate m,
size_t nb) {
4451 if ((rsize = trem) == 0)
4456 if (rt != 0 && rt != t)
4465 if (t == 0 && v == 0) {
4467 if (leftbits != 0) {
4469 binmap_t leastbit =
least_bit(leftbits);
4485 if (v != 0 && rsize < (
size_t)(m->
dvsize - nb)) {
4507 static void* tmalloc_small(mstate m,
size_t nb) {
4582 smallbits =
gm->smallmap >> idx;
4584 if ((smallbits & 0x3U) != 0) {
4586 idx += ~smallbits & 1;
4597 else if (nb >
gm->dvsize) {
4598 if (smallbits != 0) {
4603 binmap_t leastbit =
least_bit(leftbits);
4624 else if (
gm->treemap != 0 && (mem = tmalloc_small(
gm, nb)) != 0) {
4634 if (
gm->treemap != 0 && (mem = tmalloc_large(
gm, nb)) != 0) {
4640 if (nb <= gm->dvsize) {
4641 size_t rsize =
gm->dvsize - nb;
4642 mchunkptr p =
gm->dv;
4650 size_t dvs =
gm->dvsize;
4660 else if (nb < gm->topsize) {
4661 size_t rsize =
gm->topsize -= nb;
4662 mchunkptr p =
gm->top;
4672 mem = sys_alloc(
gm, nb);
4694 mstate
fm = get_mstate_for(p);
4736 if (next == fm->
top) {
4737 size_t tsize = fm->
topsize += psize;
4748 else if (next == fm->
dv) {
4749 size_t dsize = fm->
dvsize += psize;
4777 release_unused_segments(fm);
4796 if (n_elements != 0) {
4797 req = n_elements * elem_size;
4798 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
4799 (req / n_elements != elem_size))
4804 memset(mem, 0, req);
4813 static mchunkptr try_realloc_chunk(mstate m, mchunkptr p,
size_t nb,
4821 newp = mmap_resize(m, p, nb, can_move);
4823 else if (oldsize >= nb) {
4824 size_t rsize = oldsize - nb;
4829 dispose_chunk(m, r, rsize);
4833 else if (next == m->
top) {
4834 if (oldsize + m->
topsize > nb) {
4835 size_t newsize = oldsize + m->
topsize;
4836 size_t newtopsize = newsize - nb;
4845 else if (next == m->
dv) {
4847 if (oldsize + dvs >= nb) {
4848 size_t dsize = oldsize + dvs - nb;
4859 size_t newsize = oldsize + dvs;
4867 else if (!
cinuse(next)) {
4869 if (oldsize + nextsize >= nb) {
4870 size_t rsize = oldsize + nextsize - nb;
4873 size_t newsize = oldsize + nextsize;
4880 dispose_chunk(m, r, rsize);
4892 static void* internal_memalign(mstate m,
size_t alignment,
size_t bytes) {
4896 if ((alignment & (alignment-
SIZE_T_ONE)) != 0) {
4898 while (a < alignment) a <<= 1;
4914 if ((((
size_t)(mem)) & (alignment - 1)) != 0) {
4923 char* br = (
char*)
mem2chunk((
size_t)(((size_t)((
char*)mem + alignment -
4929 size_t leadsize = pos - (
char*)(p);
4930 size_t newsize =
chunksize(p) - leadsize;
4934 newp->
head = newsize;
4939 dispose_chunk(m, p, leadsize);
4948 size_t remainder_size = size - nb;
4951 set_inuse(m, remainder, remainder_size);
4952 dispose_chunk(m, remainder, remainder_size);
4958 assert(((
size_t)mem & (alignment - 1)) == 0);
4973 static void** ialloc(mstate m,
4979 size_t element_size;
4980 size_t contents_size;
4984 size_t remainder_size;
4986 mchunkptr array_chunk;
4994 if (n_elements == 0)
5001 if (n_elements == 0)
5004 array_size =
request2size(n_elements * (
sizeof(
void*)));
5010 contents_size = n_elements * element_size;
5015 for (i = 0; i != n_elements; ++
i)
5019 size = contents_size + array_size;
5041 memset((
size_t*)mem, 0, remainder_size -
SIZE_T_SIZE - array_size);
5046 size_t array_chunk_size;
5048 array_chunk_size = remainder_size - contents_size;
5049 marray = (
void**) (
chunk2mem(array_chunk));
5051 remainder_size = contents_size;
5055 for (i = 0; ; ++
i) {
5057 if (i != n_elements-1) {
5058 if (element_size != 0)
5059 size = element_size;
5062 remainder_size -= size;
5073 if (marray != chunks) {
5075 if (element_size != 0) {
5076 assert(remainder_size == element_size);
5083 for (i = 0; i != n_elements; ++
i)
5099 static size_t internal_bulk_free(mstate m,
void* array[],
size_t nelem) {
5103 void** fence = &(array[nelem]);
5104 for (a = array; a != fence; ++a) {
5110 if (get_mstate_for(p) != m) {
5120 if (b != fence && *b ==
chunk2mem(next)) {
5121 size_t newsize =
chunksize(next) + psize;
5126 dispose_chunk(m, p, psize);
5142 #if MALLOC_INSPECT_ALL
5143 static void internal_inspect_all(mstate m,
5144 void(*handler)(
void *start,
5147 void* callback_arg),
5150 mchunkptr top = m->
top;
5152 for (s = &m->
seg; s != 0; s = s->
next) {
5166 start = (
void*)((
char*)q +
sizeof(
struct malloc_chunk));
5172 if (start < (
void*)next)
5173 handler(start, next, used, arg);
5195 #ifdef REALLOC_ZERO_BYTES_FREES
5196 else if (bytes == 0) {
5206 mstate m = get_mstate_for(oldp);
5213 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
5223 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
5244 mstate m = get_mstate_for(oldp);
5251 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
5267 return internal_memalign(
gm, alignment, bytes);
5275 size_t d = alignment /
sizeof(
void*);
5276 size_t r = alignment %
sizeof(
void*);
5277 if (r != 0 || d == 0 || (d & (d-
SIZE_T_ONE)) != 0)
5282 mem = internal_memalign(
gm, alignment, bytes);
5309 size_t sz = elem_size;
5310 return ialloc(
gm, n_elements, &sz, 3, chunks);
5315 return ialloc(
gm, n_elements, sizes, 0, chunks);
5319 return internal_bulk_free(
gm, array, nelem);
5322 #if MALLOC_INSPECT_ALL
5326 void* callback_arg),
5330 internal_inspect_all(
gm, handler, arg);
5340 result = sys_trim(
gm, pad);
5347 return gm->footprint;
5351 return gm->max_footprint;
5355 size_t maf =
gm->footprint_limit;
5367 return gm->footprint_limit =
result;
5372 return internal_mallinfo(
gm);
5376 #if !NO_MALLOC_STATS
5378 internal_malloc_stats(
gm);
5383 return change_mparam(param_number, value);
5401 static mstate init_user_mstate(
char* tbase,
size_t tsize) {
5406 memset(m, 0, msize);
5419 init_top(m, mn, (
size_t)((tbase + tsize) - (
char*)mn) -
TOP_FOOT_SIZE);
5424 mspace create_mspace(
size_t capacity,
int locked) {
5430 size_t rs = ((capacity == 0)? mparams.
granularity :
5433 char* tbase = (
char*)(
CALL_MMAP(tsize));
5435 m = init_user_mstate(tbase, tsize);
5443 mspace create_mspace_with_base(
void* base,
size_t capacity,
int locked) {
5450 m = init_user_mstate((
char*)base, capacity);
5457 int mspace_track_large_chunks(mspace msp,
int enable) {
5474 size_t destroy_mspace(mspace msp) {
5478 msegmentptr sp = &ms->
seg;
5481 char* base = sp->
base;
5482 size_t size = sp->
size;
5483 flag_t flag = sp->
sflags;
5502 void* mspace_malloc(mspace msp,
size_t bytes) {
5518 if ((smallbits & 0x3U) != 0) {
5520 idx += ~smallbits & 1;
5531 else if (nb > ms->
dvsize) {
5532 if (smallbits != 0) {
5537 binmap_t leastbit =
least_bit(leftbits);
5558 else if (ms->
treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
5568 if (ms->
treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
5574 if (nb <= ms->dvsize) {
5575 size_t rsize = ms->
dvsize - nb;
5576 mchunkptr p = ms->
dv;
5594 else if (nb < ms->topsize) {
5595 size_t rsize = ms->
topsize -= nb;
5596 mchunkptr p = ms->
top;
5606 mem = sys_alloc(ms, nb);
5616 void mspace_free(mspace msp,
void* mem) {
5620 mstate
fm = get_mstate_for(p);
5663 if (next == fm->
top) {
5664 size_t tsize = fm->
topsize += psize;
5675 else if (next == fm->
dv) {
5676 size_t dsize = fm->
dvsize += psize;
5704 release_unused_segments(fm);
5717 void* mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size) {
5725 if (n_elements != 0) {
5726 req = n_elements * elem_size;
5727 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
5728 (req / n_elements != elem_size))
5733 memset(mem, 0, req);
5737 void* mspace_realloc(mspace msp,
void* oldmem,
size_t bytes) {
5740 mem = mspace_malloc(msp, bytes);
5745 #ifdef REALLOC_ZERO_BYTES_FREES
5746 else if (bytes == 0) {
5747 mspace_free(msp, oldmem);
5756 mstate m = get_mstate_for(oldp);
5763 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
5770 mem = mspace_malloc(m, bytes);
5773 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
5774 mspace_free(m, oldmem);
5782 void* mspace_realloc_in_place(mspace msp,
void* oldmem,
size_t bytes) {
5794 mstate m = get_mstate_for(oldp);
5802 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
5814 void* mspace_memalign(mspace msp,
size_t alignment,
size_t bytes) {
5821 return mspace_malloc(msp, bytes);
5822 return internal_memalign(ms, alignment, bytes);
5825 int mspace_posix_memalign(mspace msp,
void** pp,
size_t alignment,
size_t bytes) {
5829 mem = mspace_malloc(msp,bytes);
5831 size_t d = alignment /
sizeof(
void*);
5832 size_t r = alignment %
sizeof(
void*);
5833 if (r != 0 || d == 0 || (d & (d-
SIZE_T_ONE)) != 0)
5838 mem = internal_memalign(ms, alignment, bytes);
5849 void** mspace_independent_calloc(mspace msp,
size_t n_elements,
5850 size_t elem_size,
void* chunks[]) {
5851 size_t sz = elem_size;
5857 return ialloc(ms, n_elements, &sz, 3, chunks);
5860 void** mspace_independent_comalloc(mspace msp,
size_t n_elements,
5861 size_t sizes[],
void* chunks[]) {
5867 return ialloc(ms, n_elements, sizes, 0, chunks);
5870 size_t mspace_bulk_free(mspace msp,
void* array[],
size_t nelem) {
5871 return internal_bulk_free((mstate)msp, array, nelem);
5874 #if MALLOC_INSPECT_ALL
5875 void mspace_inspect_all(mspace msp,
5876 void(*handler)(
void *start,
5879 void* callback_arg),
5884 internal_inspect_all(ms, handler, arg);
5894 int mspace_trim(mspace msp,
size_t pad) {
5899 result = sys_trim(ms, pad);
5909 #if !NO_MALLOC_STATS
5910 void mspace_malloc_stats(mspace msp) {
5913 internal_malloc_stats(ms);
5921 size_t mspace_footprint(mspace msp) {
5933 size_t mspace_max_footprint(mspace msp) {
5945 size_t mspace_footprint_limit(mspace msp) {
5958 size_t mspace_set_footprint_limit(mspace msp,
size_t bytes) {
5977 struct mallinfo mspace_mallinfo(mspace msp) {
5982 return internal_mallinfo(ms);
5986 size_t mspace_usable_size(
const void* mem) {
5995 int mspace_mallopt(
int param_number,
int value) {
5996 return change_mparam(param_number, value);
#define segment_holds(S, A)
#define chunk_plus_offset(p, s)
MALLINFO_FIELD_TYPE arena
#define treemap_is_marked(M, i)
MALLINFO_FIELD_TYPE hblks
#define smallmap_is_marked(M, i)
#define is_mmapped_segment(S)
struct malloc_segment * msegmentptr
#define insert_large_chunk(M, X, S)
#define insert_chunk(M, P, S)
#define is_initialized(M)
struct malloc_tree_chunk * parent
#define is_page_aligned(S)
result_t handler(struct probe *probe, tid_t tid, void *data, struct probe *trigger, struct probe *base)
static uint64_t unsigned int i
tbinptr treebins[NTREEBINS]
#define malloc_getpagesize
#define internal_malloc(m, b)
#define leftmost_child(t)
#define replace_dv(M, P, S)
#define unlink_chunk(M, P, S)
MALLINFO_FIELD_TYPE ordblks
#define unlink_large_chunk(M, X)
struct malloc_state * mstate
#define dlmalloc_footprint_limit
#define small_index2size(i)
#define leftshift_for_tree_index(i)
#define CALL_DIRECT_MMAP(s)
#define dlindependent_comalloc
#define check_mmapped_chunk(M, P)
struct malloc_chunk * sbinptr
#define check_malloc_state(M)
#define dlrealloc_in_place
struct malloc_tree_chunk * fd
#define unlink_first_small_chunk(M, B, P, I)
#define check_malloced_chunk(M, P, N)
void * MORECORE(int size)
#define dlmalloc_set_footprint_limit
#define DEFAULT_MMAP_THRESHOLD
#define check_top_chunk(M, P)
#define internal_free(m, mem)
struct malloc_segment * next
#define insert_small_chunk(M, P, S)
struct malloc_chunk * mchunkptr
mchunkptr smallbins[(NSMALLBINS+1)*2]
#define MAX_RELEASE_CHECK_RATE
#define MALLINFO_FIELD_TYPE
#define compute_tree_index(S, I)
#define MORECORE_CONTIGUOUS
#define calloc_must_clear(p)
struct dt_argp_state opts
#define MAX_SMALL_REQUEST
#define dlindependent_calloc
#define USE_NONCONTIGUOUS_BIT
#define dlmalloc_inspect_all
#define MALLOC_FAILURE_ACTION
struct malloc_tree_chunk * tbinptr
#define NO_SEGMENT_TRAVERSAL
MALLINFO_FIELD_TYPE fordblks
struct malloc_tree_chunk * tchunkptr
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
#define chunk_minus_offset(p, s)
#define check_inuse_chunk(M, P)
#define set_free_with_pinuse(p, s, n)
#define SYS_ALLOC_PADDING
#define set_inuse(M, p, s)
#define use_noncontiguous(M)
#define CALL_MUNMAP(a, s)
MALLINFO_FIELD_TYPE fsmblks
#define USAGE_ERROR_ACTION(m, p)
#define dlmalloc_max_footprint
MALLINFO_FIELD_TYPE keepcost
#define RELEASE_MALLOC_GLOBAL_LOCK()
struct malloc_tree_chunk * bk
#define set_size_and_pinuse_of_free_chunk(p, s)
#define check_free_chunk(M, P)
#define CORRUPTION_ERROR_ACTION(m)
#define ensure_initialization()
MALLINFO_FIELD_TYPE hblkhd
#define disable_contiguous(M)
#define mark_inuse_foot(M, p, s)
struct malloc_tree_chunk * child[2]
#define request2size(req)
#define align_as_chunk(A)
#define dlmalloc_usable_size
#define smallbin_at(M, i)
#define DEFAULT_TRIM_THRESHOLD
#define set_inuse_and_pinuse(M, p, s)
#define compute_bit2idx(X, I)
#define CALL_MREMAP(addr, osz, nsz, mv)
#define DEFAULT_GRANULARITY
#define is_extern_segment(S)
#define minsize_for_tree_index(i)
#define FOUR_SIZE_T_SIZES
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
#define granularity_align(S)
#define dlmalloc_footprint
#define should_trim(M, s)
MALLINFO_FIELD_TYPE uordblks
MALLINFO_FIELD_TYPE smblks
MALLINFO_FIELD_TYPE usmblks