1/* 2 Default header file for malloc-2.8.x, written by Doug Lea 3 and released to the public domain, as explained at 4 http://creativecommons.org/publicdomain/zero/1.0/ 5*/ 6 7#ifndef MALLOC_280_H 8#define MALLOC_280_H 9 10#ifdef __cplusplus 11extern "C" { 12#endif 13 14#include <stddef.h> /* for size_t */ 15 16#ifndef HAVE_USR_INCLUDE_MALLOC_H 17#ifndef _MALLOC_H 18#ifndef MALLINFO_FIELD_TYPE 19#define MALLINFO_FIELD_TYPE size_t 20#endif /* MALLINFO_FIELD_TYPE */ 21#ifndef STRUCT_MALLINFO_DECLARED 22#define STRUCT_MALLINFO_DECLARED 1 23struct mallinfo { 24 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ 25 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ 26 MALLINFO_FIELD_TYPE smblks; /* always 0 */ 27 MALLINFO_FIELD_TYPE hblks; /* always 0 */ 28 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ 29 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ 30 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ 31 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ 32 MALLINFO_FIELD_TYPE fordblks; /* total free space */ 33 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ 34}; 35#endif /* STRUCT_MALLINFO_DECLARED */ 36#endif /* _MALLOC_H */ 37#endif /* HAVE_USR_INCLUDE_MALLOC_H */ 38 39/* 40 malloc(size_t n) 41 Returns a pointer to a newly allocated chunk of at least n bytes, or 42 null if no space is available, in which case errno is set to ENOMEM 43 on ANSI C systems. 44 45 If n is zero, malloc returns a minimum-sized chunk. (The minimum 46 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit 47 systems.) Note that size_t is an unsigned type, so calls with 48 arguments that would be negative if signed are interpreted as 49 requests for huge amounts of space, which will often fail. The 50 maximum supported value of n differs across systems, but is in all 51 cases less than the maximum representable value of a size_t. 52*/ 53void* dlmalloc(size_t); 54 55/* 56 free(void* p) 57 Releases the chunk of memory pointed to by p, that had been previously 58 allocated using malloc or a related routine such as realloc. 59 It has no effect if p is null. If p was not malloced or already 60 freed, free(p) will by default cuase the current program to abort. 61*/ 62void dlfree(void*); 63 64/* 65 calloc(size_t n_elements, size_t element_size); 66 Returns a pointer to n_elements * element_size bytes, with all locations 67 set to zero. 68*/ 69void* dlcalloc(size_t, size_t); 70 71/* 72 realloc(void* p, size_t n) 73 Returns a pointer to a chunk of size n that contains the same data 74 as does chunk p up to the minimum of (n, p's size) bytes, or null 75 if no space is available. 76 77 The returned pointer may or may not be the same as p. The algorithm 78 prefers extending p in most cases when possible, otherwise it 79 employs the equivalent of a malloc-copy-free sequence. 80 81 If p is null, realloc is equivalent to malloc. 82 83 If space is not available, realloc returns null, errno is set (if on 84 ANSI) and p is NOT freed. 85 86 if n is for fewer bytes than already held by p, the newly unused 87 space is lopped off and freed if possible. realloc with a size 88 argument of zero (re)allocates a minimum-sized chunk. 89 90 The old unix realloc convention of allowing the last-free'd chunk 91 to be used as an argument to realloc is not supported. 92*/ 93void* dlrealloc(void*, size_t); 94 95/* 96 realloc_in_place(void* p, size_t n) 97 Resizes the space allocated for p to size n, only if this can be 98 done without moving p (i.e., only if there is adjacent space 99 available if n is greater than p's current allocated size, or n is 100 less than or equal to p's size). This may be used instead of plain 101 realloc if an alternative allocation strategy is needed upon failure 102 to expand space; for example, reallocation of a buffer that must be 103 memory-aligned or cleared. You can use realloc_in_place to trigger 104 these alternatives only when needed. 105 106 Returns p if successful; otherwise null. 107*/ 108void* dlrealloc_in_place(void*, size_t); 109 110/* 111 memalign(size_t alignment, size_t n); 112 Returns a pointer to a newly allocated chunk of n bytes, aligned 113 in accord with the alignment argument. 114 115 The alignment argument should be a power of two. If the argument is 116 not a power of two, the nearest greater power is used. 117 8-byte alignment is guaranteed by normal malloc calls, so don't 118 bother calling memalign with an argument of 8 or less. 119 120 Overreliance on memalign is a sure way to fragment space. 121*/ 122void* dlmemalign(size_t, size_t); 123 124/* 125 int posix_memalign(void** pp, size_t alignment, size_t n); 126 Allocates a chunk of n bytes, aligned in accord with the alignment 127 argument. Differs from memalign only in that it (1) assigns the 128 allocated memory to *pp rather than returning it, (2) fails and 129 returns EINVAL if the alignment is not a power of two (3) fails and 130 returns ENOMEM if memory cannot be allocated. 131*/ 132int dlposix_memalign(void**, size_t, size_t); 133 134/* 135 valloc(size_t n); 136 Equivalent to memalign(pagesize, n), where pagesize is the page 137 size of the system. If the pagesize is unknown, 4096 is used. 138*/ 139void* dlvalloc(size_t); 140 141/* 142 mallopt(int parameter_number, int parameter_value) 143 Sets tunable parameters The format is to provide a 144 (parameter-number, parameter-value) pair. mallopt then sets the 145 corresponding parameter to the argument value if it can (i.e., so 146 long as the value is meaningful), and returns 1 if successful else 147 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, 148 normally defined in malloc.h. None of these are use in this malloc, 149 so setting them has no effect. But this malloc also supports other 150 options in mallopt: 151 152 Symbol param # default allowed param values 153 M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming) 154 M_GRANULARITY -2 page size any power of 2 >= page size 155 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) 156*/ 157int dlmallopt(int, int); 158 159#define M_TRIM_THRESHOLD (-1) 160#define M_GRANULARITY (-2) 161#define M_MMAP_THRESHOLD (-3) 162 163 164/* 165 malloc_footprint(); 166 Returns the number of bytes obtained from the system. The total 167 number of bytes allocated by malloc, realloc etc., is less than this 168 value. Unlike mallinfo, this function returns only a precomputed 169 result, so can be called frequently to monitor memory consumption. 170 Even if locks are otherwise defined, this function does not use them, 171 so results might not be up to date. 172*/ 173size_t dlmalloc_footprint(void); 174 175/* 176 malloc_max_footprint(); 177 Returns the maximum number of bytes obtained from the system. This 178 value will be greater than current footprint if deallocated space 179 has been reclaimed by the system. The peak number of bytes allocated 180 by malloc, realloc etc., is less than this value. Unlike mallinfo, 181 this function returns only a precomputed result, so can be called 182 frequently to monitor memory consumption. Even if locks are 183 otherwise defined, this function does not use them, so results might 184 not be up to date. 185*/ 186size_t dlmalloc_max_footprint(void); 187 188/* 189 malloc_footprint_limit(); 190 Returns the number of bytes that the heap is allowed to obtain from 191 the system, returning the last value returned by 192 malloc_set_footprint_limit, or the maximum size_t value if 193 never set. The returned value reflects a permission. There is no 194 guarantee that this number of bytes can actually be obtained from 195 the system. 196*/ 197size_t dlmalloc_footprint_limit(void); 198 199/* 200 malloc_set_footprint_limit(); 201 Sets the maximum number of bytes to obtain from the system, causing 202 failure returns from malloc and related functions upon attempts to 203 exceed this value. The argument value may be subject to page 204 rounding to an enforceable limit; this actual value is returned. 205 Using an argument of the maximum possible size_t effectively 206 disables checks. If the argument is less than or equal to the 207 current malloc_footprint, then all future allocations that require 208 additional system memory will fail. However, invocation cannot 209 retroactively deallocate existing used memory. 210*/ 211size_t dlmalloc_set_footprint_limit(size_t bytes); 212 213/* 214 malloc_inspect_all(void(*handler)(void *start, 215 void *end, 216 size_t used_bytes, 217 void* callback_arg), 218 void* arg); 219 Traverses the heap and calls the given handler for each managed 220 region, skipping all bytes that are (or may be) used for bookkeeping 221 purposes. Traversal does not include include chunks that have been 222 directly memory mapped. Each reported region begins at the start 223 address, and continues up to but not including the end address. The 224 first used_bytes of the region contain allocated data. If 225 used_bytes is zero, the region is unallocated. The handler is 226 invoked with the given callback argument. If locks are defined, they 227 are held during the entire traversal. It is a bad idea to invoke 228 other malloc functions from within the handler. 229 230 For example, to count the number of in-use chunks with size greater 231 than 1000, you could write: 232 static int count = 0; 233 void count_chunks(void* start, void* end, size_t used, void* arg) { 234 if (used >= 1000) ++count; 235 } 236 then: 237 malloc_inspect_all(count_chunks, NULL); 238 239 malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. 240*/ 241void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), 242 void* arg); 243 244/* 245 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); 246 247 independent_calloc is similar to calloc, but instead of returning a 248 single cleared space, it returns an array of pointers to n_elements 249 independent elements that can hold contents of size elem_size, each 250 of which starts out cleared, and can be independently freed, 251 realloc'ed etc. The elements are guaranteed to be adjacently 252 allocated (this is not guaranteed to occur with multiple callocs or 253 mallocs), which may also improve cache locality in some 254 applications. 255 256 The "chunks" argument is optional (i.e., may be null, which is 257 probably the most typical usage). If it is null, the returned array 258 is itself dynamically allocated and should also be freed when it is 259 no longer needed. Otherwise, the chunks array must be of at least 260 n_elements in length. It is filled in with the pointers to the 261 chunks. 262 263 In either case, independent_calloc returns this pointer array, or 264 null if the allocation failed. If n_elements is zero and "chunks" 265 is null, it returns a chunk representing an array with zero elements 266 (which should be freed if not wanted). 267 268 Each element must be freed when it is no longer needed. This can be 269 done all at once using bulk_free. 270 271 independent_calloc simplifies and speeds up implementations of many 272 kinds of pools. It may also be useful when constructing large data 273 structures that initially have a fixed number of fixed-sized nodes, 274 but the number is not known at compile time, and some of the nodes 275 may later need to be freed. For example: 276 277 struct Node { int item; struct Node* next; }; 278 279 struct Node* build_list() { 280 struct Node** pool; 281 int n = read_number_of_nodes_needed(); 282 if (n <= 0) return 0; 283 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); 284 if (pool == 0) die(); 285 // organize into a linked list... 286 struct Node* first = pool[0]; 287 for (i = 0; i < n-1; ++i) 288 pool[i]->next = pool[i+1]; 289 free(pool); // Can now free the array (or not, if it is needed later) 290 return first; 291 } 292*/ 293void** dlindependent_calloc(size_t, size_t, void**); 294 295/* 296 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); 297 298 independent_comalloc allocates, all at once, a set of n_elements 299 chunks with sizes indicated in the "sizes" array. It returns 300 an array of pointers to these elements, each of which can be 301 independently freed, realloc'ed etc. The elements are guaranteed to 302 be adjacently allocated (this is not guaranteed to occur with 303 multiple callocs or mallocs), which may also improve cache locality 304 in some applications. 305 306 The "chunks" argument is optional (i.e., may be null). If it is null 307 the returned array is itself dynamically allocated and should also 308 be freed when it is no longer needed. Otherwise, the chunks array 309 must be of at least n_elements in length. It is filled in with the 310 pointers to the chunks. 311 312 In either case, independent_comalloc returns this pointer array, or 313 null if the allocation failed. If n_elements is zero and chunks is 314 null, it returns a chunk representing an array with zero elements 315 (which should be freed if not wanted). 316 317 Each element must be freed when it is no longer needed. This can be 318 done all at once using bulk_free. 319 320 independent_comallac differs from independent_calloc in that each 321 element may have a different size, and also that it does not 322 automatically clear elements. 323 324 independent_comalloc can be used to speed up allocation in cases 325 where several structs or objects must always be allocated at the 326 same time. For example: 327 328 struct Head { ... } 329 struct Foot { ... } 330 331 void send_message(char* msg) { 332 int msglen = strlen(msg); 333 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; 334 void* chunks[3]; 335 if (independent_comalloc(3, sizes, chunks) == 0) 336 die(); 337 struct Head* head = (struct Head*)(chunks[0]); 338 char* body = (char*)(chunks[1]); 339 struct Foot* foot = (struct Foot*)(chunks[2]); 340 // ... 341 } 342 343 In general though, independent_comalloc is worth using only for 344 larger values of n_elements. For small values, you probably won't 345 detect enough difference from series of malloc calls to bother. 346 347 Overuse of independent_comalloc can increase overall memory usage, 348 since it cannot reuse existing noncontiguous small chunks that 349 might be available for some of the elements. 350*/ 351void** dlindependent_comalloc(size_t, size_t*, void**); 352 353/* 354 bulk_free(void* array[], size_t n_elements) 355 Frees and clears (sets to null) each non-null pointer in the given 356 array. This is likely to be faster than freeing them one-by-one. 357 If footers are used, pointers that have been allocated in different 358 mspaces are not freed or cleared, and the count of all such pointers 359 is returned. For large arrays of pointers with poor locality, it 360 may be worthwhile to sort this array before calling bulk_free. 361*/ 362size_t dlbulk_free(void**, size_t n_elements); 363 364/* 365 pvalloc(size_t n); 366 Equivalent to valloc(minimum-page-that-holds(n)), that is, 367 round up n to nearest pagesize. 368 */ 369void* dlpvalloc(size_t); 370 371/* 372 malloc_trim(size_t pad); 373 374 If possible, gives memory back to the system (via negative arguments 375 to sbrk) if there is unused memory at the `high' end of the malloc 376 pool or in unused MMAP segments. You can call this after freeing 377 large blocks of memory to potentially reduce the system-level memory 378 requirements of a program. However, it cannot guarantee to reduce 379 memory. Under some allocation patterns, some large free blocks of 380 memory will be locked between two used chunks, so they cannot be 381 given back to the system. 382 383 The `pad' argument to malloc_trim represents the amount of free 384 trailing space to leave untrimmed. If this argument is zero, only 385 the minimum amount of memory to maintain internal data structures 386 will be left. Non-zero arguments can be supplied to maintain enough 387 trailing space to service future expected allocations without having 388 to re-obtain memory from the system. 389 390 Malloc_trim returns 1 if it actually released any memory, else 0. 391*/ 392int dlmalloc_trim(size_t); 393 394/* 395 malloc_stats(); 396 Prints on stderr the amount of space obtained from the system (both 397 via sbrk and mmap), the maximum amount (which may be more than 398 current if malloc_trim and/or munmap got called), and the current 399 number of bytes allocated via malloc (or realloc, etc) but not yet 400 freed. Note that this is the number of bytes allocated, not the 401 number requested. It will be larger than the number requested 402 because of alignment and bookkeeping overhead. Because it includes 403 alignment wastage as being in use, this figure may be greater than 404 zero even when no user-level chunks are allocated. 405 406 The reported current and maximum system memory can be inaccurate if 407 a program makes other calls to system memory allocation functions 408 (normally sbrk) outside of malloc. 409 410 malloc_stats prints only the most commonly interesting statistics. 411 More information can be obtained by calling mallinfo. 412 413 malloc_stats is not compiled if NO_MALLOC_STATS is defined. 414*/ 415void dlmalloc_stats(void); 416 417/* 418 malloc_usable_size(void* p); 419 420 Returns the number of bytes you can actually use in 421 an allocated chunk, which may be more than you requested (although 422 often not) due to alignment and minimum size constraints. 423 You can use this many bytes without worrying about 424 overwriting other allocated objects. This is not a particularly great 425 programming practice. malloc_usable_size can be more useful in 426 debugging and assertions, for example: 427 428 p = malloc(n); 429 assert(malloc_usable_size(p) >= 256); 430*/ 431size_t dlmalloc_usable_size(const void*); 432 433#ifdef __cplusplus 434} /* extern "C" */ 435#endif 436 437#endif /* MALLOC_280_H */ 438