Stackdb
Stackdb is a stackable, multi-target and -level source debugger and memory forensics library.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
dlmalloc.h
Go to the documentation of this file.
1 /*
2  Default header file for malloc-2.8.x, written by Doug Lea
3  and released to the public domain, as explained at
4  http://creativecommons.org/publicdomain/zero/1.0/
5 
6  This header is for ANSI C/C++ only. You can set any of
7  the following #defines before including:
8 
9  * If USE_DL_PREFIX is defined, it is assumed that malloc.c
10  was also compiled with this option, so all routines
11  have names starting with "dl".
12 
13  * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
14  file will be #included AFTER <malloc.h>. This is needed only if
15  your system defines a struct mallinfo that is incompatible with the
16  standard one declared here. Otherwise, you can include this file
17  INSTEAD of your system system <malloc.h>. At least on ANSI, all
18  declarations should be compatible with system versions
19 
20  * If MSPACES is defined, declarations for mspace versions are included.
21 */
22 
23 #ifndef MALLOC_280_H
24 #define MALLOC_280_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
30 /* The maximum possible size_t value has all bits set */
31 #define MAX_SIZE_T (~(size_t)0)
32 
33 /* MORECORE and MMAP must return MFAIL on failure */
34 #define MFAIL ((void*)(MAX_SIZE_T))
35 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
36 
37 #include <stddef.h> /* for size_t */
38 
39 #ifndef ONLY_MSPACES
40 #define ONLY_MSPACES 0 /* define to a value */
41 #elif ONLY_MSPACES != 0
42 #define ONLY_MSPACES 1
43 #endif /* ONLY_MSPACES */
44 #ifndef NO_MALLINFO
45 #define NO_MALLINFO 0
46 #endif /* NO_MALLINFO */
47 
48 #ifndef MSPACES
49 #if ONLY_MSPACES
50 #define MSPACES 1
51 #else /* ONLY_MSPACES */
52 #define MSPACES 0
53 #endif /* ONLY_MSPACES */
54 #endif /* MSPACES */
55 
56 #if !ONLY_MSPACES
57 
58 #ifndef USE_DL_PREFIX
59 #define dlcalloc calloc
60 #define dlfree free
61 #define dlmalloc malloc
62 #define dlmemalign memalign
63 #define dlposix_memalign posix_memalign
64 #define dlrealloc realloc
65 #define dlvalloc valloc
66 #define dlpvalloc pvalloc
67 #define dlmallinfo mallinfo
68 #define dlmallopt mallopt
69 #define dlmalloc_trim malloc_trim
70 #define dlmalloc_stats malloc_stats
71 #define dlmalloc_usable_size malloc_usable_size
72 #define dlmalloc_footprint malloc_footprint
73 #define dlmalloc_max_footprint malloc_max_footprint
74 #define dlmalloc_footprint_limit malloc_footprint_limit
75 #define dlmalloc_set_footprint_limit malloc_set_footprint_limit
76 #define dlmalloc_inspect_all malloc_inspect_all
77 #define dlindependent_calloc independent_calloc
78 #define dlindependent_comalloc independent_comalloc
79 #define dlbulk_free bulk_free
80 #endif /* USE_DL_PREFIX */
81 
82 #if !NO_MALLINFO
83 #ifndef HAVE_USR_INCLUDE_MALLOC_H
84 #ifndef _MALLOC_H
85 #ifndef MALLINFO_FIELD_TYPE
86 #define MALLINFO_FIELD_TYPE size_t
87 #endif /* MALLINFO_FIELD_TYPE */
88 #ifndef STRUCT_MALLINFO_DECLARED
89 #define STRUCT_MALLINFO_DECLARED 1
90 struct mallinfo {
91  MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
92  MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
93  MALLINFO_FIELD_TYPE smblks; /* always 0 */
94  MALLINFO_FIELD_TYPE hblks; /* always 0 */
95  MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
96  MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
97  MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
98  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
99  MALLINFO_FIELD_TYPE fordblks; /* total free space */
100  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
101 };
102 #endif /* STRUCT_MALLINFO_DECLARED */
103 #endif /* _MALLOC_H */
104 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
105 #endif /* !NO_MALLINFO */
106 
107 /*
108  malloc(size_t n)
109  Returns a pointer to a newly allocated chunk of at least n bytes, or
110  null if no space is available, in which case errno is set to ENOMEM
111  on ANSI C systems.
112 
113  If n is zero, malloc returns a minimum-sized chunk. (The minimum
114  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
115  systems.) Note that size_t is an unsigned type, so calls with
116  arguments that would be negative if signed are interpreted as
117  requests for huge amounts of space, which will often fail. The
118  maximum supported value of n differs across systems, but is in all
119  cases less than the maximum representable value of a size_t.
120 */
121 void* dlmalloc(size_t);
122 
123 /*
124  free(void* p)
125  Releases the chunk of memory pointed to by p, that had been previously
126  allocated using malloc or a related routine such as realloc.
127  It has no effect if p is null. If p was not malloced or already
128  freed, free(p) will by default cuase the current program to abort.
129 */
130 void dlfree(void*);
131 
132 /*
133  calloc(size_t n_elements, size_t element_size);
134  Returns a pointer to n_elements * element_size bytes, with all locations
135  set to zero.
136 */
137 void* dlcalloc(size_t, size_t);
138 
139 /*
140  realloc(void* p, size_t n)
141  Returns a pointer to a chunk of size n that contains the same data
142  as does chunk p up to the minimum of (n, p's size) bytes, or null
143  if no space is available.
144 
145  The returned pointer may or may not be the same as p. The algorithm
146  prefers extending p in most cases when possible, otherwise it
147  employs the equivalent of a malloc-copy-free sequence.
148 
149  If p is null, realloc is equivalent to malloc.
150 
151  If space is not available, realloc returns null, errno is set (if on
152  ANSI) and p is NOT freed.
153 
154  if n is for fewer bytes than already held by p, the newly unused
155  space is lopped off and freed if possible. realloc with a size
156  argument of zero (re)allocates a minimum-sized chunk.
157 
158  The old unix realloc convention of allowing the last-free'd chunk
159  to be used as an argument to realloc is not supported.
160 */
161 void* dlrealloc(void*, size_t);
162 
163 /*
164  realloc_in_place(void* p, size_t n)
165  Resizes the space allocated for p to size n, only if this can be
166  done without moving p (i.e., only if there is adjacent space
167  available if n is greater than p's current allocated size, or n is
168  less than or equal to p's size). This may be used instead of plain
169  realloc if an alternative allocation strategy is needed upon failure
170  to expand space; for example, reallocation of a buffer that must be
171  memory-aligned or cleared. You can use realloc_in_place to trigger
172  these alternatives only when needed.
173 
174  Returns p if successful; otherwise null.
175 */
176 void* dlrealloc_in_place(void*, size_t);
177 
178 /*
179  memalign(size_t alignment, size_t n);
180  Returns a pointer to a newly allocated chunk of n bytes, aligned
181  in accord with the alignment argument.
182 
183  The alignment argument should be a power of two. If the argument is
184  not a power of two, the nearest greater power is used.
185  8-byte alignment is guaranteed by normal malloc calls, so don't
186  bother calling memalign with an argument of 8 or less.
187 
188  Overreliance on memalign is a sure way to fragment space.
189 */
190 void* dlmemalign(size_t, size_t);
191 
192 /*
193  int posix_memalign(void** pp, size_t alignment, size_t n);
194  Allocates a chunk of n bytes, aligned in accord with the alignment
195  argument. Differs from memalign only in that it (1) assigns the
196  allocated memory to *pp rather than returning it, (2) fails and
197  returns EINVAL if the alignment is not a power of two (3) fails and
198  returns ENOMEM if memory cannot be allocated.
199 */
200 int dlposix_memalign(void**, size_t, size_t);
201 
202 /*
203  valloc(size_t n);
204  Equivalent to memalign(pagesize, n), where pagesize is the page
205  size of the system. If the pagesize is unknown, 4096 is used.
206 */
207 void* dlvalloc(size_t);
208 
209 /*
210  mallopt(int parameter_number, int parameter_value)
211  Sets tunable parameters The format is to provide a
212  (parameter-number, parameter-value) pair. mallopt then sets the
213  corresponding parameter to the argument value if it can (i.e., so
214  long as the value is meaningful), and returns 1 if successful else
215  0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
216  normally defined in malloc.h. None of these are use in this malloc,
217  so setting them has no effect. But this malloc also supports other
218  options in mallopt:
219 
220  Symbol param # default allowed param values
221  M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
222  M_GRANULARITY -2 page size any power of 2 >= page size
223  M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
224 */
225 int dlmallopt(int, int);
226 
227 #define M_TRIM_THRESHOLD (-1)
228 #define M_GRANULARITY (-2)
229 #define M_MMAP_THRESHOLD (-3)
230 
231 
232 /*
233  malloc_footprint();
234  Returns the number of bytes obtained from the system. The total
235  number of bytes allocated by malloc, realloc etc., is less than this
236  value. Unlike mallinfo, this function returns only a precomputed
237  result, so can be called frequently to monitor memory consumption.
238  Even if locks are otherwise defined, this function does not use them,
239  so results might not be up to date.
240 */
241 size_t dlmalloc_footprint(void);
242 
243 /*
244  malloc_max_footprint();
245  Returns the maximum number of bytes obtained from the system. This
246  value will be greater than current footprint if deallocated space
247  has been reclaimed by the system. The peak number of bytes allocated
248  by malloc, realloc etc., is less than this value. Unlike mallinfo,
249  this function returns only a precomputed result, so can be called
250  frequently to monitor memory consumption. Even if locks are
251  otherwise defined, this function does not use them, so results might
252  not be up to date.
253 */
254 size_t dlmalloc_max_footprint(void);
255 
256 /*
257  malloc_footprint_limit();
258  Returns the number of bytes that the heap is allowed to obtain from
259  the system, returning the last value returned by
260  malloc_set_footprint_limit, or the maximum size_t value if
261  never set. The returned value reflects a permission. There is no
262  guarantee that this number of bytes can actually be obtained from
263  the system.
264 */
265 size_t dlmalloc_footprint_limit(void);
266 
267 /*
268  malloc_set_footprint_limit();
269  Sets the maximum number of bytes to obtain from the system, causing
270  failure returns from malloc and related functions upon attempts to
271  exceed this value. The argument value may be subject to page
272  rounding to an enforceable limit; this actual value is returned.
273  Using an argument of the maximum possible size_t effectively
274  disables checks. If the argument is less than or equal to the
275  current malloc_footprint, then all future allocations that require
276  additional system memory will fail. However, invocation cannot
277  retroactively deallocate existing used memory.
278 */
279 size_t dlmalloc_set_footprint_limit(size_t bytes);
280 
281 /*
282  malloc_inspect_all(void(*handler)(void *start,
283  void *end,
284  size_t used_bytes,
285  void* callback_arg),
286  void* arg);
287  Traverses the heap and calls the given handler for each managed
288  region, skipping all bytes that are (or may be) used for bookkeeping
289  purposes. Traversal does not include include chunks that have been
290  directly memory mapped. Each reported region begins at the start
291  address, and continues up to but not including the end address. The
292  first used_bytes of the region contain allocated data. If
293  used_bytes is zero, the region is unallocated. The handler is
294  invoked with the given callback argument. If locks are defined, they
295  are held during the entire traversal. It is a bad idea to invoke
296  other malloc functions from within the handler.
297 
298  For example, to count the number of in-use chunks with size greater
299  than 1000, you could write:
300  static int count = 0;
301  void count_chunks(void* start, void* end, size_t used, void* arg) {
302  if (used >= 1000) ++count;
303  }
304  then:
305  malloc_inspect_all(count_chunks, NULL);
306 
307  malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
308 */
309 void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
310  void* arg);
311 
312 #if !NO_MALLINFO
313 /*
314  mallinfo()
315  Returns (by copy) a struct containing various summary statistics:
316 
317  arena: current total non-mmapped bytes allocated from system
318  ordblks: the number of free chunks
319  smblks: always zero.
320  hblks: current number of mmapped regions
321  hblkhd: total bytes held in mmapped regions
322  usmblks: the maximum total allocated space. This will be greater
323  than current total if trimming has occurred.
324  fsmblks: always zero
325  uordblks: current total allocated space (normal or mmapped)
326  fordblks: total free space
327  keepcost: the maximum number of bytes that could ideally be released
328  back to system via malloc_trim. ("ideally" means that
329  it ignores page restrictions etc.)
330 
331  Because these fields are ints, but internal bookkeeping may
332  be kept as longs, the reported values may wrap around zero and
333  thus be inaccurate.
334 */
335 
336 struct mallinfo dlmallinfo(void);
337 #endif /* NO_MALLINFO */
338 
339 /*
340  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
341 
342  independent_calloc is similar to calloc, but instead of returning a
343  single cleared space, it returns an array of pointers to n_elements
344  independent elements that can hold contents of size elem_size, each
345  of which starts out cleared, and can be independently freed,
346  realloc'ed etc. The elements are guaranteed to be adjacently
347  allocated (this is not guaranteed to occur with multiple callocs or
348  mallocs), which may also improve cache locality in some
349  applications.
350 
351  The "chunks" argument is optional (i.e., may be null, which is
352  probably the most typical usage). If it is null, the returned array
353  is itself dynamically allocated and should also be freed when it is
354  no longer needed. Otherwise, the chunks array must be of at least
355  n_elements in length. It is filled in with the pointers to the
356  chunks.
357 
358  In either case, independent_calloc returns this pointer array, or
359  null if the allocation failed. If n_elements is zero and "chunks"
360  is null, it returns a chunk representing an array with zero elements
361  (which should be freed if not wanted).
362 
363  Each element must be freed when it is no longer needed. This can be
364  done all at once using bulk_free.
365 
366  independent_calloc simplifies and speeds up implementations of many
367  kinds of pools. It may also be useful when constructing large data
368  structures that initially have a fixed number of fixed-sized nodes,
369  but the number is not known at compile time, and some of the nodes
370  may later need to be freed. For example:
371 
372  struct Node { int item; struct Node* next; };
373 
374  struct Node* build_list() {
375  struct Node** pool;
376  int n = read_number_of_nodes_needed();
377  if (n <= 0) return 0;
378  pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
379  if (pool == 0) die();
380  // organize into a linked list...
381  struct Node* first = pool[0];
382  for (i = 0; i < n-1; ++i)
383  pool[i]->next = pool[i+1];
384  free(pool); // Can now free the array (or not, if it is needed later)
385  return first;
386  }
387 */
388 void** dlindependent_calloc(size_t, size_t, void**);
389 
390 /*
391  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
392 
393  independent_comalloc allocates, all at once, a set of n_elements
394  chunks with sizes indicated in the "sizes" array. It returns
395  an array of pointers to these elements, each of which can be
396  independently freed, realloc'ed etc. The elements are guaranteed to
397  be adjacently allocated (this is not guaranteed to occur with
398  multiple callocs or mallocs), which may also improve cache locality
399  in some applications.
400 
401  The "chunks" argument is optional (i.e., may be null). If it is null
402  the returned array is itself dynamically allocated and should also
403  be freed when it is no longer needed. Otherwise, the chunks array
404  must be of at least n_elements in length. It is filled in with the
405  pointers to the chunks.
406 
407  In either case, independent_comalloc returns this pointer array, or
408  null if the allocation failed. If n_elements is zero and chunks is
409  null, it returns a chunk representing an array with zero elements
410  (which should be freed if not wanted).
411 
412  Each element must be freed when it is no longer needed. This can be
413  done all at once using bulk_free.
414 
415  independent_comallac differs from independent_calloc in that each
416  element may have a different size, and also that it does not
417  automatically clear elements.
418 
419  independent_comalloc can be used to speed up allocation in cases
420  where several structs or objects must always be allocated at the
421  same time. For example:
422 
423  struct Head { ... }
424  struct Foot { ... }
425 
426  void send_message(char* msg) {
427  int msglen = strlen(msg);
428  size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
429  void* chunks[3];
430  if (independent_comalloc(3, sizes, chunks) == 0)
431  die();
432  struct Head* head = (struct Head*)(chunks[0]);
433  char* body = (char*)(chunks[1]);
434  struct Foot* foot = (struct Foot*)(chunks[2]);
435  // ...
436  }
437 
438  In general though, independent_comalloc is worth using only for
439  larger values of n_elements. For small values, you probably won't
440  detect enough difference from series of malloc calls to bother.
441 
442  Overuse of independent_comalloc can increase overall memory usage,
443  since it cannot reuse existing noncontiguous small chunks that
444  might be available for some of the elements.
445 */
446 void** dlindependent_comalloc(size_t, size_t*, void**);
447 
448 /*
449  bulk_free(void* array[], size_t n_elements)
450  Frees and clears (sets to null) each non-null pointer in the given
451  array. This is likely to be faster than freeing them one-by-one.
452  If footers are used, pointers that have been allocated in different
453  mspaces are not freed or cleared, and the count of all such pointers
454  is returned. For large arrays of pointers with poor locality, it
455  may be worthwhile to sort this array before calling bulk_free.
456 */
457 size_t dlbulk_free(void**, size_t n_elements);
458 
459 /*
460  pvalloc(size_t n);
461  Equivalent to valloc(minimum-page-that-holds(n)), that is,
462  round up n to nearest pagesize.
463  */
464 void* dlpvalloc(size_t);
465 
466 /*
467  malloc_trim(size_t pad);
468 
469  If possible, gives memory back to the system (via negative arguments
470  to sbrk) if there is unused memory at the `high' end of the malloc
471  pool or in unused MMAP segments. You can call this after freeing
472  large blocks of memory to potentially reduce the system-level memory
473  requirements of a program. However, it cannot guarantee to reduce
474  memory. Under some allocation patterns, some large free blocks of
475  memory will be locked between two used chunks, so they cannot be
476  given back to the system.
477 
478  The `pad' argument to malloc_trim represents the amount of free
479  trailing space to leave untrimmed. If this argument is zero, only
480  the minimum amount of memory to maintain internal data structures
481  will be left. Non-zero arguments can be supplied to maintain enough
482  trailing space to service future expected allocations without having
483  to re-obtain memory from the system.
484 
485  Malloc_trim returns 1 if it actually released any memory, else 0.
486 */
487 int dlmalloc_trim(size_t);
488 
489 /*
490  malloc_stats();
491  Prints on stderr the amount of space obtained from the system (both
492  via sbrk and mmap), the maximum amount (which may be more than
493  current if malloc_trim and/or munmap got called), and the current
494  number of bytes allocated via malloc (or realloc, etc) but not yet
495  freed. Note that this is the number of bytes allocated, not the
496  number requested. It will be larger than the number requested
497  because of alignment and bookkeeping overhead. Because it includes
498  alignment wastage as being in use, this figure may be greater than
499  zero even when no user-level chunks are allocated.
500 
501  The reported current and maximum system memory can be inaccurate if
502  a program makes other calls to system memory allocation functions
503  (normally sbrk) outside of malloc.
504 
505  malloc_stats prints only the most commonly interesting statistics.
506  More information can be obtained by calling mallinfo.
507 
508  malloc_stats is not compiled if NO_MALLOC_STATS is defined.
509 */
510 void dlmalloc_stats(void);
511 
512 #endif /* !ONLY_MSPACES */
513 
514 /*
515  malloc_usable_size(void* p);
516 
517  Returns the number of bytes you can actually use in
518  an allocated chunk, which may be more than you requested (although
519  often not) due to alignment and minimum size constraints.
520  You can use this many bytes without worrying about
521  overwriting other allocated objects. This is not a particularly great
522  programming practice. malloc_usable_size can be more useful in
523  debugging and assertions, for example:
524 
525  p = malloc(n);
526  assert(malloc_usable_size(p) >= 256);
527 */
528 size_t dlmalloc_usable_size(void*);
529 
530 #if MSPACES
531 
532 /*
533  mspace is an opaque type representing an independent
534  region of space that supports mspace_malloc, etc.
535 */
536 typedef void* mspace;
537 
538 /*
539  create_mspace creates and returns a new independent space with the
540  given initial capacity, or, if 0, the default granularity size. It
541  returns null if there is no system memory available to create the
542  space. If argument locked is non-zero, the space uses a separate
543  lock to control access. The capacity of the space will grow
544  dynamically as needed to service mspace_malloc requests. You can
545  control the sizes of incremental increases of this space by
546  compiling with a different DEFAULT_GRANULARITY or dynamically
547  setting with mallopt(M_GRANULARITY, value).
548 */
549 mspace create_mspace(size_t capacity, int locked);
550 
551 /*
552  destroy_mspace destroys the given space, and attempts to return all
553  of its memory back to the system, returning the total number of
554  bytes freed. After destruction, the results of access to all memory
555  used by the space become undefined.
556 */
557 size_t destroy_mspace(mspace msp);
558 
559 /*
560  create_mspace_with_base uses the memory supplied as the initial base
561  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
562  space is used for bookkeeping, so the capacity must be at least this
563  large. (Otherwise 0 is returned.) When this initial space is
564  exhausted, additional memory will be obtained from the system.
565  Destroying this space will deallocate all additionally allocated
566  space (if possible) but not the initial base.
567 */
568 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
569 
570 /*
571  mspace_track_large_chunks controls whether requests for large chunks
572  are allocated in their own untracked mmapped regions, separate from
573  others in this mspace. By default large chunks are not tracked,
574  which reduces fragmentation. However, such chunks are not
575  necessarily released to the system upon destroy_mspace. Enabling
576  tracking by setting to true may increase fragmentation, but avoids
577  leakage when relying on destroy_mspace to release all memory
578  allocated using this space. The function returns the previous
579  setting.
580 */
581 int mspace_track_large_chunks(mspace msp, int enable);
582 
583 #if !NO_MALLINFO
584 /*
585  mspace_mallinfo behaves as mallinfo, but reports properties of
586  the given space.
587 */
588 struct mallinfo mspace_mallinfo(mspace msp);
589 #endif /* NO_MALLINFO */
590 
591 /*
592  An alias for mallopt.
593 */
594 int mspace_mallopt(int, int);
595 
596 /*
597  The following operate identically to their malloc counterparts
598  but operate only for the given mspace argument
599 */
600 void* mspace_malloc(mspace msp, size_t bytes);
601 void mspace_free(mspace msp, void* mem);
602 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
603 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
604 void* mspace_realloc_in_place(mspace msp, void* mem, size_t newsize);
605 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
606 int mspace_posix_memalign(mspace msp, void** pp, size_t alignment, size_t bytes);
607 void** mspace_independent_calloc(mspace msp, size_t n_elements,
608  size_t elem_size, void* chunks[]);
609 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
610  size_t sizes[], void* chunks[]);
611 size_t mspace_bulk_free(mspace msp, void**, size_t n_elements);
612 size_t mspace_usable_size(const void* mem);
613 void mspace_malloc_stats(mspace msp);
614 int mspace_trim(mspace msp, size_t pad);
615 size_t mspace_footprint(mspace msp);
616 size_t mspace_max_footprint(mspace msp);
617 size_t mspace_footprint_limit(mspace msp);
618 size_t mspace_set_footprint_limit(mspace msp, size_t bytes);
619 void mspace_inspect_all(mspace msp,
620  void(*handler)(void *, void *, size_t, void*),
621  void* arg);
622 #endif /* MSPACES */
623 
624 #ifdef __cplusplus
625 }; /* end of extern "C" */
626 #endif
627 
628 #endif /* MALLOC_280_H */
#define dlmalloc_usable_size
Definition: dlmalloc.h:71
#define dlindependent_comalloc
Definition: dlmalloc.h:78
MALLINFO_FIELD_TYPE arena
Definition: dlmalloc.c:764
MALLINFO_FIELD_TYPE hblks
Definition: dlmalloc.c:767
#define dlmalloc_stats
Definition: dlmalloc.h:70
#define dlmalloc_set_footprint_limit
Definition: dlmalloc.h:75
#define dlindependent_calloc
Definition: dlmalloc.h:77
MALLINFO_FIELD_TYPE ordblks
Definition: dlmalloc.c:765
#define dlmalloc_inspect_all
Definition: dlmalloc.h:76
#define dlcalloc
Definition: dlmalloc.h:59
#define dlfree
Definition: dlmalloc.h:60
#define dlvalloc
Definition: dlmalloc.h:65
#define dlmalloc_trim
Definition: dlmalloc.h:69
#define dlmalloc
Definition: dlmalloc.h:61
#define dlmallinfo
Definition: dlmalloc.h:67
#define dlposix_memalign
Definition: dlmalloc.h:63
MALLINFO_FIELD_TYPE fordblks
Definition: dlmalloc.c:772
#define dlmalloc_footprint
Definition: dlmalloc.h:72
#define dlbulk_free
Definition: dlmalloc.h:79
#define dlmalloc_footprint_limit
Definition: dlmalloc.h:74
#define dlmemalign
Definition: dlmalloc.h:62
MALLINFO_FIELD_TYPE fsmblks
Definition: dlmalloc.c:770
MALLINFO_FIELD_TYPE keepcost
Definition: dlmalloc.c:773
MALLINFO_FIELD_TYPE hblkhd
Definition: dlmalloc.c:768
#define dlpvalloc
Definition: dlmalloc.h:66
#define MALLINFO_FIELD_TYPE
Definition: dlmalloc.h:86
#define dlmallopt
Definition: dlmalloc.h:68
void * dlrealloc_in_place(void *, size_t)
Definition: dlmalloc.c:5232
#define dlmalloc_max_footprint
Definition: dlmalloc.h:73
#define dlrealloc
Definition: dlmalloc.h:64
MALLINFO_FIELD_TYPE uordblks
Definition: dlmalloc.c:771
MALLINFO_FIELD_TYPE smblks
Definition: dlmalloc.c:766
MALLINFO_FIELD_TYPE usmblks
Definition: dlmalloc.c:769