malloc.c revision 1.275
1/* $OpenBSD: malloc.c,v 1.275 2022/10/14 04:38:39 deraadt Exp $ */ 2/* 3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> 4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> 5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org> 6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21/* 22 * If we meet some day, and you think this stuff is worth it, you 23 * can buy me a beer in return. Poul-Henning Kamp 24 */ 25 26/* #define MALLOC_STATS */ 27 28#include <sys/types.h> 29#include <sys/queue.h> 30#include <sys/mman.h> 31#include <sys/sysctl.h> 32#include <uvm/uvmexp.h> 33#include <errno.h> 34#include <stdarg.h> 35#include <stdint.h> 36#include <stdio.h> 37#include <stdlib.h> 38#include <string.h> 39#include <unistd.h> 40 41#ifdef MALLOC_STATS 42#include <sys/tree.h> 43#include <fcntl.h> 44#endif 45 46#include "thread_private.h" 47#include <tib.h> 48 49#define MALLOC_PAGESHIFT _MAX_PAGE_SHIFT 50 51#define MALLOC_MINSHIFT 4 52#define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) 53#define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) 54#define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) 55#define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) 56#define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) 57 58#define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) 59#define MALLOC_MAXCACHE 256 60#define MALLOC_DELAYED_CHUNK_MASK 15 61#ifdef MALLOC_STATS 62#define MALLOC_INITIAL_REGIONS 512 63#else 64#define MALLOC_INITIAL_REGIONS (MALLOC_PAGESIZE / sizeof(struct region_info)) 65#endif 66#define MALLOC_DEFAULT_CACHE 64 67#define MALLOC_CHUNK_LISTS 4 68#define CHUNK_CHECK_LENGTH 32 69 70/* 71 * We move allocations between half a page and a whole page towards the end, 72 * subject to alignment constraints. This is the extra headroom we allow. 73 * Set to zero to be the most strict. 74 */ 75#define MALLOC_LEEWAY 0 76#define MALLOC_MOVE_COND(sz) ((sz) - mopts.malloc_guard < \ 77 MALLOC_PAGESIZE - MALLOC_LEEWAY) 78#define MALLOC_MOVE(p, sz) (((char *)(p)) + \ 79 ((MALLOC_PAGESIZE - MALLOC_LEEWAY - \ 80 ((sz) - mopts.malloc_guard)) & \ 81 ~(MALLOC_MINSIZE - 1))) 82 83#define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) 84 85/* 86 * What to use for Junk. This is the byte value we use to fill with 87 * when the 'J' option is enabled. Use SOME_JUNK right after alloc, 88 * and SOME_FREEJUNK right before free. 89 */ 90#define SOME_JUNK 0xdb /* deadbeef */ 91#define SOME_FREEJUNK 0xdf /* dead, free */ 92#define SOME_FREEJUNK_ULL 0xdfdfdfdfdfdfdfdfULL 93 94#define MMAP(sz,f) mmap(NULL, (sz), PROT_READ | PROT_WRITE, \ 95 MAP_ANON | MAP_PRIVATE | (f), -1, 0) 96 97#define MMAPNONE(sz,f) mmap(NULL, (sz), PROT_NONE, \ 98 MAP_ANON | MAP_PRIVATE | (f), -1, 0) 99 100#define MMAPA(a,sz,f) mmap((a), (sz), PROT_READ | PROT_WRITE, \ 101 MAP_ANON | MAP_PRIVATE | (f), -1, 0) 102 103struct region_info { 104 void *p; /* page; low bits used to mark chunks */ 105 uintptr_t size; /* size for pages, or chunk_info pointer */ 106#ifdef MALLOC_STATS 107 void *f; /* where allocated from */ 108#endif 109}; 110 111LIST_HEAD(chunk_head, chunk_info); 112 113/* 114 * Two caches, one for "small" regions, one for "big". 115 * Small cache is an array per size, big cache is one array with different 116 * sized regions 117 */ 118#define MAX_SMALLCACHEABLE_SIZE 32 119#define MAX_BIGCACHEABLE_SIZE 512 120/* If the total # of pages is larger than this, evict before inserting */ 121#define BIGCACHE_FILL(sz) (MAX_BIGCACHEABLE_SIZE * (sz) / 4) 122 123struct smallcache { 124 void **pages; 125 ushort length; 126 ushort max; 127}; 128 129struct bigcache { 130 void *page; 131 size_t psize; 132}; 133 134struct dir_info { 135 u_int32_t canary1; 136 int active; /* status of malloc */ 137 struct region_info *r; /* region slots */ 138 size_t regions_total; /* number of region slots */ 139 size_t regions_free; /* number of free slots */ 140 size_t rbytesused; /* random bytes used */ 141 char *func; /* current function */ 142 int malloc_junk; /* junk fill? */ 143 int mmap_flag; /* extra flag for mmap */ 144 int mutex; 145 /* lists of free chunk info structs */ 146 struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; 147 /* lists of chunks with free slots */ 148 struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; 149 /* delayed free chunk slots */ 150 void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; 151 u_char rbytes[32]; /* random bytes */ 152 /* free pages cache */ 153 struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE]; 154 size_t bigcache_used; 155 size_t bigcache_size; 156 struct bigcache *bigcache; 157#ifdef MALLOC_STATS 158 size_t inserts; 159 size_t insert_collisions; 160 size_t finds; 161 size_t find_collisions; 162 size_t deletes; 163 size_t delete_moves; 164 size_t cheap_realloc_tries; 165 size_t cheap_reallocs; 166 size_t malloc_used; /* bytes allocated */ 167 size_t malloc_guarded; /* bytes used for guards */ 168 size_t pool_searches; /* searches for pool */ 169 size_t other_pool; /* searches in other pool */ 170#define STATS_ADD(x,y) ((x) += (y)) 171#define STATS_SUB(x,y) ((x) -= (y)) 172#define STATS_INC(x) ((x)++) 173#define STATS_ZERO(x) ((x) = 0) 174#define STATS_SETF(x,y) ((x)->f = (y)) 175#else 176#define STATS_ADD(x,y) /* nothing */ 177#define STATS_SUB(x,y) /* nothing */ 178#define STATS_INC(x) /* nothing */ 179#define STATS_ZERO(x) /* nothing */ 180#define STATS_SETF(x,y) /* nothing */ 181#endif /* MALLOC_STATS */ 182 u_int32_t canary2; 183}; 184#define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ 185 ~MALLOC_PAGEMASK) 186 187static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear); 188 189/* 190 * This structure describes a page worth of chunks. 191 * 192 * How many bits per u_short in the bitmap 193 */ 194#define MALLOC_BITS (NBBY * sizeof(u_short)) 195struct chunk_info { 196 LIST_ENTRY(chunk_info) entries; 197 void *page; /* pointer to the page */ 198 u_short canary; 199 u_short size; /* size of this page's chunks */ 200 u_short shift; /* how far to shift for this size */ 201 u_short free; /* how many free chunks */ 202 u_short total; /* how many chunks */ 203 u_short offset; /* requested size table offset */ 204 u_short bits[1]; /* which chunks are free */ 205}; 206 207struct malloc_readonly { 208 /* Main bookkeeping information */ 209 struct dir_info *malloc_pool[_MALLOC_MUTEXES]; 210 u_int malloc_mutexes; /* how much in actual use? */ 211 int malloc_mt; /* multi-threaded mode? */ 212 int malloc_freecheck; /* Extensive double free check */ 213 int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ 214 int def_malloc_junk; /* junk fill? */ 215 int malloc_realloc; /* always realloc? */ 216 int malloc_xmalloc; /* xmalloc behaviour? */ 217 u_int chunk_canaries; /* use canaries after chunks? */ 218 int internal_funcs; /* use better recallocarray/freezero? */ 219 u_int def_maxcache; /* free pages we cache */ 220 size_t malloc_guard; /* use guard pages after allocations? */ 221#ifdef MALLOC_STATS 222 int malloc_stats; /* dump statistics at end */ 223#endif 224 u_int32_t malloc_canary; /* Matched against ones in pool */ 225}; 226 227/* This object is mapped PROT_READ after initialisation to prevent tampering */ 228static union { 229 struct malloc_readonly mopts; 230 u_char _pad[MALLOC_PAGESIZE]; 231} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))) 232 __attribute__((section(".openbsd.mutable"))); 233#define mopts malloc_readonly.mopts 234 235char *malloc_options; /* compile-time options */ 236 237static __dead void wrterror(struct dir_info *d, char *msg, ...) 238 __attribute__((__format__ (printf, 2, 3))); 239 240#ifdef MALLOC_STATS 241void malloc_dump(int, int, struct dir_info *); 242PROTO_NORMAL(malloc_dump); 243void malloc_gdump(int); 244PROTO_NORMAL(malloc_gdump); 245static void malloc_exit(void); 246#define CALLER __builtin_return_address(0) 247#else 248#define CALLER NULL 249#endif 250 251/* low bits of r->p determine size: 0 means >= page size and r->size holding 252 * real size, otherwise low bits are a shift count, or 1 for malloc(0) 253 */ 254#define REALSIZE(sz, r) \ 255 (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ 256 (sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) 257 258static inline void 259_MALLOC_LEAVE(struct dir_info *d) 260{ 261 if (mopts.malloc_mt) { 262 d->active--; 263 _MALLOC_UNLOCK(d->mutex); 264 } 265} 266 267static inline void 268_MALLOC_ENTER(struct dir_info *d) 269{ 270 if (mopts.malloc_mt) { 271 _MALLOC_LOCK(d->mutex); 272 d->active++; 273 } 274} 275 276static inline size_t 277hash(void *p) 278{ 279 size_t sum; 280 uintptr_t u; 281 282 u = (uintptr_t)p >> MALLOC_PAGESHIFT; 283 sum = u; 284 sum = (sum << 7) - sum + (u >> 16); 285#ifdef __LP64__ 286 sum = (sum << 7) - sum + (u >> 32); 287 sum = (sum << 7) - sum + (u >> 48); 288#endif 289 return sum; 290} 291 292static inline struct dir_info * 293getpool(void) 294{ 295 if (!mopts.malloc_mt) 296 return mopts.malloc_pool[1]; 297 else /* first one reserved for special pool */ 298 return mopts.malloc_pool[1 + TIB_GET()->tib_tid % 299 (mopts.malloc_mutexes - 1)]; 300} 301 302static __dead void 303wrterror(struct dir_info *d, char *msg, ...) 304{ 305 int saved_errno = errno; 306 va_list ap; 307 308 dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname, 309 getpid(), (d != NULL && d->func) ? d->func : "unknown"); 310 va_start(ap, msg); 311 vdprintf(STDERR_FILENO, msg, ap); 312 va_end(ap); 313 dprintf(STDERR_FILENO, "\n"); 314 315#ifdef MALLOC_STATS 316 if (mopts.malloc_stats) 317 malloc_gdump(STDERR_FILENO); 318#endif /* MALLOC_STATS */ 319 320 errno = saved_errno; 321 322 abort(); 323} 324 325static void 326rbytes_init(struct dir_info *d) 327{ 328 arc4random_buf(d->rbytes, sizeof(d->rbytes)); 329 /* add 1 to account for using d->rbytes[0] */ 330 d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); 331} 332 333static inline u_char 334getrbyte(struct dir_info *d) 335{ 336 u_char x; 337 338 if (d->rbytesused >= sizeof(d->rbytes)) 339 rbytes_init(d); 340 x = d->rbytes[d->rbytesused++]; 341 return x; 342} 343 344static void 345omalloc_parseopt(char opt) 346{ 347 switch (opt) { 348 case '+': 349 mopts.malloc_mutexes <<= 1; 350 if (mopts.malloc_mutexes > _MALLOC_MUTEXES) 351 mopts.malloc_mutexes = _MALLOC_MUTEXES; 352 break; 353 case '-': 354 mopts.malloc_mutexes >>= 1; 355 if (mopts.malloc_mutexes < 2) 356 mopts.malloc_mutexes = 2; 357 break; 358 case '>': 359 mopts.def_maxcache <<= 1; 360 if (mopts.def_maxcache > MALLOC_MAXCACHE) 361 mopts.def_maxcache = MALLOC_MAXCACHE; 362 break; 363 case '<': 364 mopts.def_maxcache >>= 1; 365 break; 366 case 'c': 367 mopts.chunk_canaries = 0; 368 break; 369 case 'C': 370 mopts.chunk_canaries = 1; 371 break; 372#ifdef MALLOC_STATS 373 case 'd': 374 mopts.malloc_stats = 0; 375 break; 376 case 'D': 377 mopts.malloc_stats = 1; 378 break; 379#endif /* MALLOC_STATS */ 380 case 'f': 381 mopts.malloc_freecheck = 0; 382 mopts.malloc_freeunmap = 0; 383 break; 384 case 'F': 385 mopts.malloc_freecheck = 1; 386 mopts.malloc_freeunmap = 1; 387 break; 388 case 'g': 389 mopts.malloc_guard = 0; 390 break; 391 case 'G': 392 mopts.malloc_guard = MALLOC_PAGESIZE; 393 break; 394 case 'j': 395 if (mopts.def_malloc_junk > 0) 396 mopts.def_malloc_junk--; 397 break; 398 case 'J': 399 if (mopts.def_malloc_junk < 2) 400 mopts.def_malloc_junk++; 401 break; 402 case 'r': 403 mopts.malloc_realloc = 0; 404 break; 405 case 'R': 406 mopts.malloc_realloc = 1; 407 break; 408 case 'u': 409 mopts.malloc_freeunmap = 0; 410 break; 411 case 'U': 412 mopts.malloc_freeunmap = 1; 413 break; 414 case 'x': 415 mopts.malloc_xmalloc = 0; 416 break; 417 case 'X': 418 mopts.malloc_xmalloc = 1; 419 break; 420 default: 421 dprintf(STDERR_FILENO, "malloc() warning: " 422 "unknown char in MALLOC_OPTIONS\n"); 423 break; 424 } 425} 426 427static void 428omalloc_init(void) 429{ 430 char *p, *q, b[16]; 431 int i, j; 432 const int mib[2] = { CTL_VM, VM_MALLOC_CONF }; 433 size_t sb; 434 435 /* 436 * Default options 437 */ 438 mopts.malloc_mutexes = 8; 439 mopts.def_malloc_junk = 1; 440 mopts.def_maxcache = MALLOC_DEFAULT_CACHE; 441 442 for (i = 0; i < 3; i++) { 443 switch (i) { 444 case 0: 445 sb = sizeof(b); 446 j = sysctl(mib, 2, b, &sb, NULL, 0); 447 if (j != 0) 448 continue; 449 p = b; 450 break; 451 case 1: 452 if (issetugid() == 0) 453 p = getenv("MALLOC_OPTIONS"); 454 else 455 continue; 456 break; 457 case 2: 458 p = malloc_options; 459 break; 460 default: 461 p = NULL; 462 } 463 464 for (; p != NULL && *p != '\0'; p++) { 465 switch (*p) { 466 case 'S': 467 for (q = "CFGJ"; *q != '\0'; q++) 468 omalloc_parseopt(*q); 469 mopts.def_maxcache = 0; 470 break; 471 case 's': 472 for (q = "cfgj"; *q != '\0'; q++) 473 omalloc_parseopt(*q); 474 mopts.def_maxcache = MALLOC_DEFAULT_CACHE; 475 break; 476 default: 477 omalloc_parseopt(*p); 478 break; 479 } 480 } 481 } 482 483#ifdef MALLOC_STATS 484 if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { 485 dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed." 486 " Will not be able to dump stats on exit\n"); 487 } 488#endif /* MALLOC_STATS */ 489 490 while ((mopts.malloc_canary = arc4random()) == 0) 491 ; 492 if (mopts.chunk_canaries) 493 do { 494 mopts.chunk_canaries = arc4random(); 495 } while ((u_char)mopts.chunk_canaries == 0 || 496 (u_char)mopts.chunk_canaries == SOME_FREEJUNK); 497} 498 499static void 500omalloc_poolinit(struct dir_info **dp, int mmap_flag) 501{ 502 char *p; 503 size_t d_avail, regioninfo_size; 504 struct dir_info *d; 505 int i, j; 506 507 /* 508 * Allocate dir_info with a guard page on either side. Also 509 * randomise offset inside the page at which the dir_info 510 * lies (subject to alignment by 1 << MALLOC_MINSHIFT) 511 */ 512 if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2), mmap_flag)) == 513 MAP_FAILED) 514 wrterror(NULL, "malloc init mmap failed"); 515 mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE); 516 d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; 517 d = (struct dir_info *)(p + MALLOC_PAGESIZE + 518 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); 519 520 rbytes_init(d); 521 d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; 522 regioninfo_size = d->regions_total * sizeof(struct region_info); 523 d->r = MMAP(regioninfo_size, mmap_flag); 524 if (d->r == MAP_FAILED) { 525 d->regions_total = 0; 526 wrterror(NULL, "malloc init mmap failed"); 527 } 528 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 529 LIST_INIT(&d->chunk_info_list[i]); 530 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) 531 LIST_INIT(&d->chunk_dir[i][j]); 532 } 533 STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE); 534 d->mmap_flag = mmap_flag; 535 d->malloc_junk = mopts.def_malloc_junk; 536 d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; 537 d->canary2 = ~d->canary1; 538 539 *dp = d; 540} 541 542static int 543omalloc_grow(struct dir_info *d) 544{ 545 size_t newtotal; 546 size_t newsize; 547 size_t mask; 548 size_t i, oldpsz; 549 struct region_info *p; 550 551 if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2) 552 return 1; 553 554 newtotal = d->regions_total * 2; 555 newsize = PAGEROUND(newtotal * sizeof(struct region_info)); 556 mask = newtotal - 1; 557 558 /* Don't use cache here, we don't want user uaf touch this */ 559 p = MMAP(newsize, d->mmap_flag); 560 if (p == MAP_FAILED) 561 return 1; 562 563 STATS_ADD(d->malloc_used, newsize); 564 STATS_ZERO(d->inserts); 565 STATS_ZERO(d->insert_collisions); 566 for (i = 0; i < d->regions_total; i++) { 567 void *q = d->r[i].p; 568 if (q != NULL) { 569 size_t index = hash(q) & mask; 570 STATS_INC(d->inserts); 571 while (p[index].p != NULL) { 572 index = (index - 1) & mask; 573 STATS_INC(d->insert_collisions); 574 } 575 p[index] = d->r[i]; 576 } 577 } 578 579 oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info)); 580 /* clear to avoid meta info ending up in the cache */ 581 unmap(d, d->r, oldpsz, oldpsz); 582 d->regions_free += d->regions_total; 583 d->regions_total = newtotal; 584 d->r = p; 585 return 0; 586} 587 588/* 589 * The hashtable uses the assumption that p is never NULL. This holds since 590 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ. 591 */ 592static int 593insert(struct dir_info *d, void *p, size_t sz, void *f) 594{ 595 size_t index; 596 size_t mask; 597 void *q; 598 599 if (d->regions_free * 4 < d->regions_total) { 600 if (omalloc_grow(d)) 601 return 1; 602 } 603 mask = d->regions_total - 1; 604 index = hash(p) & mask; 605 q = d->r[index].p; 606 STATS_INC(d->inserts); 607 while (q != NULL) { 608 index = (index - 1) & mask; 609 q = d->r[index].p; 610 STATS_INC(d->insert_collisions); 611 } 612 d->r[index].p = p; 613 d->r[index].size = sz; 614#ifdef MALLOC_STATS 615 d->r[index].f = f; 616#endif 617 d->regions_free--; 618 return 0; 619} 620 621static struct region_info * 622find(struct dir_info *d, void *p) 623{ 624 size_t index; 625 size_t mask = d->regions_total - 1; 626 void *q, *r; 627 628 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 629 d->canary1 != ~d->canary2) 630 wrterror(d, "internal struct corrupt"); 631 p = MASK_POINTER(p); 632 index = hash(p) & mask; 633 r = d->r[index].p; 634 q = MASK_POINTER(r); 635 STATS_INC(d->finds); 636 while (q != p && r != NULL) { 637 index = (index - 1) & mask; 638 r = d->r[index].p; 639 q = MASK_POINTER(r); 640 STATS_INC(d->find_collisions); 641 } 642 return (q == p && r != NULL) ? &d->r[index] : NULL; 643} 644 645static void 646delete(struct dir_info *d, struct region_info *ri) 647{ 648 /* algorithm R, Knuth Vol III section 6.4 */ 649 size_t mask = d->regions_total - 1; 650 size_t i, j, r; 651 652 if (d->regions_total & (d->regions_total - 1)) 653 wrterror(d, "regions_total not 2^x"); 654 d->regions_free++; 655 STATS_INC(d->deletes); 656 657 i = ri - d->r; 658 for (;;) { 659 d->r[i].p = NULL; 660 d->r[i].size = 0; 661 j = i; 662 for (;;) { 663 i = (i - 1) & mask; 664 if (d->r[i].p == NULL) 665 return; 666 r = hash(d->r[i].p) & mask; 667 if ((i <= r && r < j) || (r < j && j < i) || 668 (j < i && i <= r)) 669 continue; 670 d->r[j] = d->r[i]; 671 STATS_INC(d->delete_moves); 672 break; 673 } 674 675 } 676} 677 678static inline void 679junk_free(int junk, void *p, size_t sz) 680{ 681 size_t i, step = 1; 682 uint64_t *lp = p; 683 684 if (junk == 0 || sz == 0) 685 return; 686 sz /= sizeof(uint64_t); 687 if (junk == 1) { 688 if (sz > MALLOC_PAGESIZE / sizeof(uint64_t)) 689 sz = MALLOC_PAGESIZE / sizeof(uint64_t); 690 step = sz / 4; 691 if (step == 0) 692 step = 1; 693 } 694 for (i = 0; i < sz; i += step) 695 lp[i] = SOME_FREEJUNK_ULL; 696} 697 698static inline void 699validate_junk(struct dir_info *pool, void *p, size_t sz) 700{ 701 size_t i, step = 1; 702 uint64_t *lp = p; 703 704 if (pool->malloc_junk == 0 || sz == 0) 705 return; 706 sz /= sizeof(uint64_t); 707 if (pool->malloc_junk == 1) { 708 if (sz > MALLOC_PAGESIZE / sizeof(uint64_t)) 709 sz = MALLOC_PAGESIZE / sizeof(uint64_t); 710 step = sz / 4; 711 if (step == 0) 712 step = 1; 713 } 714 for (i = 0; i < sz; i += step) { 715 if (lp[i] != SOME_FREEJUNK_ULL) 716 wrterror(pool, "write after free %p", p); 717 } 718} 719 720 721/* 722 * Cache maintenance. 723 * Opposed to the regular region data structure, the sizes in the 724 * cache are in MALLOC_PAGESIZE units. 725 */ 726static void 727unmap(struct dir_info *d, void *p, size_t sz, size_t clear) 728{ 729 size_t psz = sz >> MALLOC_PAGESHIFT; 730 void *r; 731 u_short i; 732 struct smallcache *cache; 733 734 if (sz != PAGEROUND(sz) || psz == 0) 735 wrterror(d, "munmap round"); 736 737 if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE && 738 psz <= MAX_BIGCACHEABLE_SIZE) { 739 u_short base = getrbyte(d); 740 u_short j; 741 742 /* don't look through all slots */ 743 for (j = 0; j < d->bigcache_size / 4; j++) { 744 i = (base + j) % d->bigcache_size; 745 if (d->bigcache_used < 746 BIGCACHE_FILL(d->bigcache_size)) { 747 if (d->bigcache[i].psize == 0) 748 break; 749 } else { 750 if (d->bigcache[i].psize != 0) 751 break; 752 } 753 } 754 /* if we didn't find a preferred slot, use random one */ 755 if (d->bigcache[i].psize != 0) { 756 size_t tmp; 757 758 r = d->bigcache[i].page; 759 d->bigcache_used -= d->bigcache[i].psize; 760 tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT; 761 if (!mopts.malloc_freeunmap) 762 validate_junk(d, r, tmp); 763 if (munmap(r, tmp)) 764 wrterror(d, "munmap %p", r); 765 STATS_SUB(d->malloc_used, tmp); 766 } 767 768 if (clear > 0) 769 explicit_bzero(p, clear); 770 if (mopts.malloc_freeunmap) { 771 if (mprotect(p, sz, PROT_NONE)) 772 wrterror(d, "mprotect %p", r); 773 } else 774 junk_free(d->malloc_junk, p, sz); 775 d->bigcache[i].page = p; 776 d->bigcache[i].psize = psz; 777 d->bigcache_used += psz; 778 return; 779 } 780 if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) { 781 if (munmap(p, sz)) 782 wrterror(d, "munmap %p", p); 783 STATS_SUB(d->malloc_used, sz); 784 return; 785 } 786 cache = &d->smallcache[psz - 1]; 787 if (cache->length == cache->max) { 788 /* use a random slot */ 789 i = getrbyte(d) % cache->max; 790 r = cache->pages[i]; 791 if (!mopts.malloc_freeunmap) 792 validate_junk(d, r, sz); 793 if (munmap(r, sz)) 794 wrterror(d, "munmap %p", r); 795 STATS_SUB(d->malloc_used, sz); 796 cache->length--; 797 } else 798 i = cache->length; 799 800 /* fill slot */ 801 if (clear > 0) 802 explicit_bzero(p, clear); 803 if (mopts.malloc_freeunmap) 804 mprotect(p, sz, PROT_NONE); 805 else 806 junk_free(d->malloc_junk, p, sz); 807 cache->pages[i] = p; 808 cache->length++; 809} 810 811static void * 812map(struct dir_info *d, size_t sz, int zero_fill) 813{ 814 size_t psz = sz >> MALLOC_PAGESHIFT; 815 u_short i; 816 void *p; 817 struct smallcache *cache; 818 819 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 820 d->canary1 != ~d->canary2) 821 wrterror(d, "internal struct corrupt"); 822 if (sz != PAGEROUND(sz) || psz == 0) 823 wrterror(d, "map round"); 824 825 826 if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE && 827 psz <= MAX_BIGCACHEABLE_SIZE) { 828 size_t base = getrbyte(d); 829 size_t cached = d->bigcache_used; 830 ushort j; 831 832 for (j = 0; j < d->bigcache_size && cached >= psz; j++) { 833 i = (j + base) % d->bigcache_size; 834 if (d->bigcache[i].psize == psz) { 835 p = d->bigcache[i].page; 836 d->bigcache_used -= psz; 837 d->bigcache[i].page = NULL; 838 d->bigcache[i].psize = 0; 839 840 if (!mopts.malloc_freeunmap) 841 validate_junk(d, p, sz); 842 if (mopts.malloc_freeunmap) 843 mprotect(p, sz, PROT_READ | PROT_WRITE); 844 if (zero_fill) 845 memset(p, 0, sz); 846 else if (mopts.malloc_freeunmap) 847 junk_free(d->malloc_junk, p, sz); 848 return p; 849 } 850 cached -= d->bigcache[i].psize; 851 } 852 } 853 if (psz <= MAX_SMALLCACHEABLE_SIZE && d->smallcache[psz - 1].max > 0) { 854 cache = &d->smallcache[psz - 1]; 855 if (cache->length > 0) { 856 if (cache->length == 1) 857 p = cache->pages[--cache->length]; 858 else { 859 i = getrbyte(d) % cache->length; 860 p = cache->pages[i]; 861 cache->pages[i] = cache->pages[--cache->length]; 862 } 863 if (!mopts.malloc_freeunmap) 864 validate_junk(d, p, sz); 865 if (mopts.malloc_freeunmap) 866 mprotect(p, sz, PROT_READ | PROT_WRITE); 867 if (zero_fill) 868 memset(p, 0, sz); 869 else if (mopts.malloc_freeunmap) 870 junk_free(d->malloc_junk, p, sz); 871 return p; 872 } 873 if (psz <= 1) { 874 _MALLOC_LEAVE(d); 875 p = MMAP(cache->max * sz, d->mmap_flag); 876 _MALLOC_ENTER(d); 877 if (p != MAP_FAILED) { 878 STATS_ADD(d->malloc_used, cache->max * sz); 879 cache->length = cache->max - 1; 880 for (i = 0; i < cache->max - 1; i++) { 881 void *q = (char*)p + i * sz; 882 cache->pages[i] = q; 883 if (!mopts.malloc_freeunmap) 884 junk_free(d->malloc_junk, q, 885 sz); 886 } 887 if (mopts.malloc_freeunmap) 888 mprotect(p, (cache->max - 1) * sz, 889 PROT_NONE); 890 p = (char*)p + (cache->max - 1) * sz; 891 /* zero fill not needed */ 892 return p; 893 } 894 } 895 896 } 897 _MALLOC_LEAVE(d); 898 p = MMAP(sz, d->mmap_flag); 899 _MALLOC_ENTER(d); 900 if (p != MAP_FAILED) 901 STATS_ADD(d->malloc_used, sz); 902 /* zero fill not needed */ 903 return p; 904} 905 906static void 907init_chunk_info(struct dir_info *d, struct chunk_info *p, int bits) 908{ 909 int i; 910 911 if (bits == 0) { 912 p->shift = MALLOC_MINSHIFT; 913 p->total = p->free = MALLOC_PAGESIZE >> p->shift; 914 p->size = 0; 915 p->offset = 0xdead; 916 } else { 917 p->shift = bits; 918 p->total = p->free = MALLOC_PAGESIZE >> p->shift; 919 p->size = 1U << bits; 920 p->offset = howmany(p->total, MALLOC_BITS); 921 } 922 p->canary = (u_short)d->canary1; 923 924 /* set all valid bits in the bitmap */ 925 i = p->total - 1; 926 memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS)); 927 p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1; 928} 929 930static struct chunk_info * 931alloc_chunk_info(struct dir_info *d, int bits) 932{ 933 struct chunk_info *p; 934 935 if (LIST_EMPTY(&d->chunk_info_list[bits])) { 936 size_t size, count, i; 937 char *q; 938 939 if (bits == 0) 940 count = MALLOC_PAGESIZE / MALLOC_MINSIZE; 941 else 942 count = MALLOC_PAGESIZE >> bits; 943 944 size = howmany(count, MALLOC_BITS); 945 size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); 946 if (mopts.chunk_canaries) 947 size += count * sizeof(u_short); 948 size = _ALIGN(size); 949 950 /* Don't use cache here, we don't want user uaf touch this */ 951 q = MMAP(MALLOC_PAGESIZE, d->mmap_flag); 952 if (q == MAP_FAILED) 953 return NULL; 954 STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); 955 count = MALLOC_PAGESIZE / size; 956 957 for (i = 0; i < count; i++, q += size) { 958 p = (struct chunk_info *)q; 959 LIST_INSERT_HEAD(&d->chunk_info_list[bits], p, entries); 960 } 961 } 962 p = LIST_FIRST(&d->chunk_info_list[bits]); 963 LIST_REMOVE(p, entries); 964 if (p->shift == 0) 965 init_chunk_info(d, p, bits); 966 return p; 967} 968 969/* 970 * Allocate a page of chunks 971 */ 972static struct chunk_info * 973omalloc_make_chunks(struct dir_info *d, int bits, int listnum) 974{ 975 struct chunk_info *bp; 976 void *pp; 977 978 /* Allocate a new bucket */ 979 pp = map(d, MALLOC_PAGESIZE, 0); 980 if (pp == MAP_FAILED) 981 return NULL; 982 983 /* memory protect the page allocated in the malloc(0) case */ 984 if (bits == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1) 985 goto err; 986 987 bp = alloc_chunk_info(d, bits); 988 if (bp == NULL) 989 goto err; 990 bp->page = pp; 991 992 if (insert(d, (void *)((uintptr_t)pp | (bits + 1)), (uintptr_t)bp, 993 NULL)) 994 goto err; 995 LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); 996 return bp; 997 998err: 999 unmap(d, pp, MALLOC_PAGESIZE, 0); 1000 return NULL; 1001} 1002 1003static int 1004find_chunksize(size_t size) 1005{ 1006 int r; 1007 1008 /* malloc(0) is special */ 1009 if (size == 0) 1010 return 0; 1011 1012 if (size < MALLOC_MINSIZE) 1013 size = MALLOC_MINSIZE; 1014 size--; 1015 1016 r = MALLOC_MINSHIFT; 1017 while (size >> r) 1018 r++; 1019 return r; 1020} 1021 1022static void 1023fill_canary(char *ptr, size_t sz, size_t allocated) 1024{ 1025 size_t check_sz = allocated - sz; 1026 1027 if (check_sz > CHUNK_CHECK_LENGTH) 1028 check_sz = CHUNK_CHECK_LENGTH; 1029 memset(ptr + sz, mopts.chunk_canaries, check_sz); 1030} 1031 1032/* 1033 * Allocate a chunk 1034 */ 1035static void * 1036malloc_bytes(struct dir_info *d, size_t size, void *f) 1037{ 1038 u_int i, r; 1039 int j, listnum; 1040 size_t k; 1041 u_short *lp; 1042 struct chunk_info *bp; 1043 void *p; 1044 1045 if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || 1046 d->canary1 != ~d->canary2) 1047 wrterror(d, "internal struct corrupt"); 1048 1049 j = find_chunksize(size); 1050 1051 r = ((u_int)getrbyte(d) << 8) | getrbyte(d); 1052 listnum = r % MALLOC_CHUNK_LISTS; 1053 /* If it's empty, make a page more of that size chunks */ 1054 if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { 1055 bp = omalloc_make_chunks(d, j, listnum); 1056 if (bp == NULL) 1057 return NULL; 1058 } 1059 1060 if (bp->canary != (u_short)d->canary1) 1061 wrterror(d, "chunk info corrupted"); 1062 1063 i = (r / MALLOC_CHUNK_LISTS) & (bp->total - 1); 1064 1065 /* start somewhere in a short */ 1066 lp = &bp->bits[i / MALLOC_BITS]; 1067 if (*lp) { 1068 j = i % MALLOC_BITS; 1069 k = ffs(*lp >> j); 1070 if (k != 0) { 1071 k += j - 1; 1072 goto found; 1073 } 1074 } 1075 /* no bit halfway, go to next full short */ 1076 i /= MALLOC_BITS; 1077 for (;;) { 1078 if (++i >= bp->total / MALLOC_BITS) 1079 i = 0; 1080 lp = &bp->bits[i]; 1081 if (*lp) { 1082 k = ffs(*lp) - 1; 1083 break; 1084 } 1085 } 1086found: 1087#ifdef MALLOC_STATS 1088 if (i == 0 && k == 0) { 1089 struct region_info *r = find(d, bp->page); 1090 r->f = f; 1091 } 1092#endif 1093 1094 *lp ^= 1 << k; 1095 1096 /* If there are no more free, remove from free-list */ 1097 if (--bp->free == 0) 1098 LIST_REMOVE(bp, entries); 1099 1100 /* Adjust to the real offset of that chunk */ 1101 k += (lp - bp->bits) * MALLOC_BITS; 1102 1103 if (mopts.chunk_canaries && size > 0) 1104 bp->bits[bp->offset + k] = size; 1105 1106 k <<= bp->shift; 1107 1108 p = (char *)bp->page + k; 1109 if (bp->size > 0) { 1110 if (d->malloc_junk == 2) 1111 memset(p, SOME_JUNK, bp->size); 1112 else if (mopts.chunk_canaries) 1113 fill_canary(p, size, bp->size); 1114 } 1115 return p; 1116} 1117 1118static void 1119validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) 1120{ 1121 size_t check_sz = allocated - sz; 1122 u_char *p, *q; 1123 1124 if (check_sz > CHUNK_CHECK_LENGTH) 1125 check_sz = CHUNK_CHECK_LENGTH; 1126 p = ptr + sz; 1127 q = p + check_sz; 1128 1129 while (p < q) { 1130 if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) { 1131 wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s", 1132 ptr, p - ptr, sz, 1133 *p == SOME_FREEJUNK ? " (double free?)" : ""); 1134 } 1135 p++; 1136 } 1137} 1138 1139static uint32_t 1140find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check) 1141{ 1142 uint32_t chunknum; 1143 1144 if (info->canary != (u_short)d->canary1) 1145 wrterror(d, "chunk info corrupted"); 1146 1147 /* Find the chunk number on the page */ 1148 chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; 1149 1150 if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) 1151 wrterror(d, "modified chunk-pointer %p", ptr); 1152 if (info->bits[chunknum / MALLOC_BITS] & 1153 (1U << (chunknum % MALLOC_BITS))) 1154 wrterror(d, "chunk is already free %p", ptr); 1155 if (check && info->size > 0) { 1156 validate_canary(d, ptr, info->bits[info->offset + chunknum], 1157 info->size); 1158 } 1159 return chunknum; 1160} 1161 1162/* 1163 * Free a chunk, and possibly the page it's on, if the page becomes empty. 1164 */ 1165static void 1166free_bytes(struct dir_info *d, struct region_info *r, void *ptr) 1167{ 1168 struct chunk_head *mp; 1169 struct chunk_info *info; 1170 uint32_t chunknum; 1171 int listnum; 1172 1173 info = (struct chunk_info *)r->size; 1174 chunknum = find_chunknum(d, info, ptr, 0); 1175 1176 info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); 1177 info->free++; 1178 1179 if (info->free == 1) { 1180 /* Page became non-full */ 1181 listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; 1182 if (info->size != 0) 1183 mp = &d->chunk_dir[info->shift][listnum]; 1184 else 1185 mp = &d->chunk_dir[0][listnum]; 1186 1187 LIST_INSERT_HEAD(mp, info, entries); 1188 return; 1189 } 1190 1191 if (info->free != info->total) 1192 return; 1193 1194 LIST_REMOVE(info, entries); 1195 1196 if (info->size == 0 && !mopts.malloc_freeunmap) 1197 mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); 1198 unmap(d, info->page, MALLOC_PAGESIZE, 0); 1199 1200 delete(d, r); 1201 if (info->size != 0) 1202 mp = &d->chunk_info_list[info->shift]; 1203 else 1204 mp = &d->chunk_info_list[0]; 1205 LIST_INSERT_HEAD(mp, info, entries); 1206} 1207 1208 1209 1210static void * 1211omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f) 1212{ 1213 void *p; 1214 size_t psz; 1215 1216 if (sz > MALLOC_MAXCHUNK) { 1217 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1218 errno = ENOMEM; 1219 return NULL; 1220 } 1221 sz += mopts.malloc_guard; 1222 psz = PAGEROUND(sz); 1223 p = map(pool, psz, zero_fill); 1224 if (p == MAP_FAILED) { 1225 errno = ENOMEM; 1226 return NULL; 1227 } 1228 if (insert(pool, p, sz, f)) { 1229 unmap(pool, p, psz, 0); 1230 errno = ENOMEM; 1231 return NULL; 1232 } 1233 if (mopts.malloc_guard) { 1234 if (mprotect((char *)p + psz - mopts.malloc_guard, 1235 mopts.malloc_guard, PROT_NONE)) 1236 wrterror(pool, "mprotect"); 1237 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 1238 } 1239 1240 if (MALLOC_MOVE_COND(sz)) { 1241 /* fill whole allocation */ 1242 if (pool->malloc_junk == 2) 1243 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 1244 /* shift towards the end */ 1245 p = MALLOC_MOVE(p, sz); 1246 /* fill zeros if needed and overwritten above */ 1247 if (zero_fill && pool->malloc_junk == 2) 1248 memset(p, 0, sz - mopts.malloc_guard); 1249 } else { 1250 if (pool->malloc_junk == 2) { 1251 if (zero_fill) 1252 memset((char *)p + sz - 1253 mopts.malloc_guard, SOME_JUNK, 1254 psz - sz); 1255 else 1256 memset(p, SOME_JUNK, 1257 psz - mopts.malloc_guard); 1258 } else if (mopts.chunk_canaries) 1259 fill_canary(p, sz - mopts.malloc_guard, 1260 psz - mopts.malloc_guard); 1261 } 1262 1263 } else { 1264 /* takes care of SOME_JUNK */ 1265 p = malloc_bytes(pool, sz, f); 1266 if (zero_fill && p != NULL && sz > 0) 1267 memset(p, 0, sz); 1268 } 1269 1270 return p; 1271} 1272 1273/* 1274 * Common function for handling recursion. Only 1275 * print the error message once, to avoid making the problem 1276 * potentially worse. 1277 */ 1278static void 1279malloc_recurse(struct dir_info *d) 1280{ 1281 static int noprint; 1282 1283 if (noprint == 0) { 1284 noprint = 1; 1285 wrterror(d, "recursive call"); 1286 } 1287 d->active--; 1288 _MALLOC_UNLOCK(d->mutex); 1289 errno = EDEADLK; 1290} 1291 1292void 1293_malloc_init(int from_rthreads) 1294{ 1295 u_int i, j, nmutexes; 1296 struct dir_info *d; 1297 1298 _MALLOC_LOCK(1); 1299 if (!from_rthreads && mopts.malloc_pool[1]) { 1300 _MALLOC_UNLOCK(1); 1301 return; 1302 } 1303 if (!mopts.malloc_canary) 1304 omalloc_init(); 1305 1306 nmutexes = from_rthreads ? mopts.malloc_mutexes : 2; 1307 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 1308 mprotect(&malloc_readonly, sizeof(malloc_readonly), 1309 PROT_READ | PROT_WRITE); 1310 for (i = 0; i < nmutexes; i++) { 1311 if (mopts.malloc_pool[i]) 1312 continue; 1313 if (i == 0) { 1314 omalloc_poolinit(&d, MAP_CONCEAL); 1315 d->malloc_junk = 2; 1316 d->bigcache_size = 0; 1317 for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) 1318 d->smallcache[j].max = 0; 1319 } else { 1320 size_t sz = 0; 1321 1322 omalloc_poolinit(&d, 0); 1323 d->malloc_junk = mopts.def_malloc_junk; 1324 d->bigcache_size = mopts.def_maxcache; 1325 for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { 1326 d->smallcache[j].max = 1327 mopts.def_maxcache >> (j / 8); 1328 sz += d->smallcache[j].max * sizeof(void *); 1329 } 1330 sz += d->bigcache_size * sizeof(struct bigcache); 1331 if (sz > 0) { 1332 void *p = MMAP(sz, 0); 1333 if (p == MAP_FAILED) 1334 wrterror(NULL, 1335 "malloc init mmap failed"); 1336 for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) { 1337 d->smallcache[j].pages = p; 1338 p = (char *)p + d->smallcache[j].max * 1339 sizeof(void *); 1340 } 1341 d->bigcache = p; 1342 } 1343 } 1344 d->mutex = i; 1345 mopts.malloc_pool[i] = d; 1346 } 1347 1348 if (from_rthreads) 1349 mopts.malloc_mt = 1; 1350 else 1351 mopts.internal_funcs = 1; 1352 1353 /* 1354 * Options have been set and will never be reset. 1355 * Prevent further tampering with them. 1356 */ 1357 if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) 1358 mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); 1359 _MALLOC_UNLOCK(1); 1360} 1361DEF_STRONG(_malloc_init); 1362 1363#define PROLOGUE(p, fn) \ 1364 d = (p); \ 1365 if (d == NULL) { \ 1366 _malloc_init(0); \ 1367 d = (p); \ 1368 } \ 1369 _MALLOC_LOCK(d->mutex); \ 1370 d->func = fn; \ 1371 if (d->active++) { \ 1372 malloc_recurse(d); \ 1373 return NULL; \ 1374 } \ 1375 1376#define EPILOGUE() \ 1377 d->active--; \ 1378 _MALLOC_UNLOCK(d->mutex); \ 1379 if (r == NULL && mopts.malloc_xmalloc) \ 1380 wrterror(d, "out of memory"); \ 1381 if (r != NULL) \ 1382 errno = saved_errno; \ 1383 1384void * 1385malloc(size_t size) 1386{ 1387 void *r; 1388 struct dir_info *d; 1389 int saved_errno = errno; 1390 1391 PROLOGUE(getpool(), "malloc") 1392 r = omalloc(d, size, 0, CALLER); 1393 EPILOGUE() 1394 return r; 1395} 1396/*DEF_STRONG(malloc);*/ 1397 1398void * 1399malloc_conceal(size_t size) 1400{ 1401 void *r; 1402 struct dir_info *d; 1403 int saved_errno = errno; 1404 1405 PROLOGUE(mopts.malloc_pool[0], "malloc_conceal") 1406 r = omalloc(d, size, 0, CALLER); 1407 EPILOGUE() 1408 return r; 1409} 1410DEF_WEAK(malloc_conceal); 1411 1412static struct region_info * 1413findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool, 1414 char **saved_function) 1415{ 1416 struct dir_info *pool = argpool; 1417 struct region_info *r = find(pool, p); 1418 1419 STATS_INC(pool->pool_searches); 1420 if (r == NULL) { 1421 u_int i, nmutexes; 1422 1423 nmutexes = mopts.malloc_mt ? mopts.malloc_mutexes : 2; 1424 STATS_INC(pool->other_pool); 1425 for (i = 1; i < nmutexes; i++) { 1426 u_int j = (argpool->mutex + i) & (nmutexes - 1); 1427 1428 pool->active--; 1429 _MALLOC_UNLOCK(pool->mutex); 1430 pool = mopts.malloc_pool[j]; 1431 _MALLOC_LOCK(pool->mutex); 1432 pool->active++; 1433 r = find(pool, p); 1434 if (r != NULL) { 1435 *saved_function = pool->func; 1436 pool->func = argpool->func; 1437 break; 1438 } 1439 } 1440 if (r == NULL) 1441 wrterror(argpool, "bogus pointer (double free?) %p", p); 1442 } 1443 *foundpool = pool; 1444 return r; 1445} 1446 1447static void 1448ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz) 1449{ 1450 struct region_info *r; 1451 struct dir_info *pool; 1452 char *saved_function; 1453 size_t sz; 1454 1455 r = findpool(p, *argpool, &pool, &saved_function); 1456 1457 REALSIZE(sz, r); 1458 if (pool->mmap_flag) { 1459 clear = 1; 1460 if (!check) { 1461 argsz = sz; 1462 if (sz > MALLOC_MAXCHUNK) 1463 argsz -= mopts.malloc_guard; 1464 } 1465 } 1466 if (check) { 1467 if (sz <= MALLOC_MAXCHUNK) { 1468 if (mopts.chunk_canaries && sz > 0) { 1469 struct chunk_info *info = 1470 (struct chunk_info *)r->size; 1471 uint32_t chunknum = 1472 find_chunknum(pool, info, p, 0); 1473 1474 if (info->bits[info->offset + chunknum] < argsz) 1475 wrterror(pool, "recorded size %hu" 1476 " < %zu", 1477 info->bits[info->offset + chunknum], 1478 argsz); 1479 } else { 1480 if (sz < argsz) 1481 wrterror(pool, "chunk size %zu < %zu", 1482 sz, argsz); 1483 } 1484 } else if (sz - mopts.malloc_guard < argsz) { 1485 wrterror(pool, "recorded size %zu < %zu", 1486 sz - mopts.malloc_guard, argsz); 1487 } 1488 } 1489 if (sz > MALLOC_MAXCHUNK) { 1490 if (!MALLOC_MOVE_COND(sz)) { 1491 if (r->p != p) 1492 wrterror(pool, "bogus pointer %p", p); 1493 if (mopts.chunk_canaries) 1494 validate_canary(pool, p, 1495 sz - mopts.malloc_guard, 1496 PAGEROUND(sz - mopts.malloc_guard)); 1497 } else { 1498 /* shifted towards the end */ 1499 if (p != MALLOC_MOVE(r->p, sz)) 1500 wrterror(pool, "bogus moved pointer %p", p); 1501 p = r->p; 1502 } 1503 if (mopts.malloc_guard) { 1504 if (sz < mopts.malloc_guard) 1505 wrterror(pool, "guard size"); 1506 if (!mopts.malloc_freeunmap) { 1507 if (mprotect((char *)p + PAGEROUND(sz) - 1508 mopts.malloc_guard, mopts.malloc_guard, 1509 PROT_READ | PROT_WRITE)) 1510 wrterror(pool, "mprotect"); 1511 } 1512 STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); 1513 } 1514 unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0); 1515 delete(pool, r); 1516 } else { 1517 /* Validate and optionally canary check */ 1518 struct chunk_info *info = (struct chunk_info *)r->size; 1519 if (info->size != sz) 1520 wrterror(pool, "internal struct corrupt"); 1521 find_chunknum(pool, info, p, mopts.chunk_canaries); 1522 if (!clear) { 1523 void *tmp; 1524 int i; 1525 1526 if (mopts.malloc_freecheck) { 1527 for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) 1528 if (p == pool->delayed_chunks[i]) 1529 wrterror(pool, 1530 "double free %p", p); 1531 } 1532 junk_free(pool->malloc_junk, p, sz); 1533 i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; 1534 tmp = p; 1535 p = pool->delayed_chunks[i]; 1536 if (tmp == p) 1537 wrterror(pool, "double free %p", tmp); 1538 pool->delayed_chunks[i] = tmp; 1539 if (p != NULL) { 1540 r = find(pool, p); 1541 REALSIZE(sz, r); 1542 if (r != NULL) 1543 validate_junk(pool, p, sz); 1544 } 1545 } else if (argsz > 0) { 1546 r = find(pool, p); 1547 explicit_bzero(p, argsz); 1548 } 1549 if (p != NULL) { 1550 if (r == NULL) 1551 wrterror(pool, 1552 "bogus pointer (double free?) %p", p); 1553 free_bytes(pool, r, p); 1554 } 1555 } 1556 1557 if (*argpool != pool) { 1558 pool->func = saved_function; 1559 *argpool = pool; 1560 } 1561} 1562 1563void 1564free(void *ptr) 1565{ 1566 struct dir_info *d; 1567 int saved_errno = errno; 1568 1569 /* This is legal. */ 1570 if (ptr == NULL) 1571 return; 1572 1573 d = getpool(); 1574 if (d == NULL) 1575 wrterror(d, "free() called before allocation"); 1576 _MALLOC_LOCK(d->mutex); 1577 d->func = "free"; 1578 if (d->active++) { 1579 malloc_recurse(d); 1580 return; 1581 } 1582 ofree(&d, ptr, 0, 0, 0); 1583 d->active--; 1584 _MALLOC_UNLOCK(d->mutex); 1585 errno = saved_errno; 1586} 1587/*DEF_STRONG(free);*/ 1588 1589static void 1590freezero_p(void *ptr, size_t sz) 1591{ 1592 explicit_bzero(ptr, sz); 1593 free(ptr); 1594} 1595 1596void 1597freezero(void *ptr, size_t sz) 1598{ 1599 struct dir_info *d; 1600 int saved_errno = errno; 1601 1602 /* This is legal. */ 1603 if (ptr == NULL) 1604 return; 1605 1606 if (!mopts.internal_funcs) { 1607 freezero_p(ptr, sz); 1608 return; 1609 } 1610 1611 d = getpool(); 1612 if (d == NULL) 1613 wrterror(d, "freezero() called before allocation"); 1614 _MALLOC_LOCK(d->mutex); 1615 d->func = "freezero"; 1616 if (d->active++) { 1617 malloc_recurse(d); 1618 return; 1619 } 1620 ofree(&d, ptr, 1, 1, sz); 1621 d->active--; 1622 _MALLOC_UNLOCK(d->mutex); 1623 errno = saved_errno; 1624} 1625DEF_WEAK(freezero); 1626 1627static void * 1628orealloc(struct dir_info **argpool, void *p, size_t newsz, void *f) 1629{ 1630 struct region_info *r; 1631 struct dir_info *pool; 1632 char *saved_function; 1633 struct chunk_info *info; 1634 size_t oldsz, goldsz, gnewsz; 1635 void *q, *ret; 1636 uint32_t chunknum; 1637 int forced; 1638 1639 if (p == NULL) 1640 return omalloc(*argpool, newsz, 0, f); 1641 1642 if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 1643 errno = ENOMEM; 1644 return NULL; 1645 } 1646 1647 r = findpool(p, *argpool, &pool, &saved_function); 1648 1649 REALSIZE(oldsz, r); 1650 if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) { 1651 info = (struct chunk_info *)r->size; 1652 chunknum = find_chunknum(pool, info, p, 0); 1653 } 1654 1655 goldsz = oldsz; 1656 if (oldsz > MALLOC_MAXCHUNK) { 1657 if (oldsz < mopts.malloc_guard) 1658 wrterror(pool, "guard size"); 1659 oldsz -= mopts.malloc_guard; 1660 } 1661 1662 gnewsz = newsz; 1663 if (gnewsz > MALLOC_MAXCHUNK) 1664 gnewsz += mopts.malloc_guard; 1665 1666 forced = mopts.malloc_realloc || pool->mmap_flag; 1667 if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && !forced) { 1668 /* First case: from n pages sized allocation to m pages sized 1669 allocation, m > n */ 1670 size_t roldsz = PAGEROUND(goldsz); 1671 size_t rnewsz = PAGEROUND(gnewsz); 1672 1673 if (rnewsz < roldsz && rnewsz > roldsz / 2 && 1674 roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE && 1675 !mopts.malloc_guard) { 1676 1677 ret = p; 1678 goto done; 1679 } 1680 1681 if (rnewsz > roldsz) { 1682 /* try to extend existing region */ 1683 if (!mopts.malloc_guard) { 1684 void *hint = (char *)r->p + roldsz; 1685 size_t needed = rnewsz - roldsz; 1686 1687 STATS_INC(pool->cheap_realloc_tries); 1688 q = MMAPA(hint, needed, MAP_FIXED | __MAP_NOREPLACE | pool->mmap_flag); 1689 if (q == hint) { 1690 STATS_ADD(pool->malloc_used, needed); 1691 if (pool->malloc_junk == 2) 1692 memset(q, SOME_JUNK, needed); 1693 r->size = gnewsz; 1694 if (r->p != p) { 1695 /* old pointer is moved */ 1696 memmove(r->p, p, oldsz); 1697 p = r->p; 1698 } 1699 if (mopts.chunk_canaries) 1700 fill_canary(p, newsz, 1701 PAGEROUND(newsz)); 1702 STATS_SETF(r, f); 1703 STATS_INC(pool->cheap_reallocs); 1704 ret = p; 1705 goto done; 1706 } 1707 } 1708 } else if (rnewsz < roldsz) { 1709 /* shrink number of pages */ 1710 if (mopts.malloc_guard) { 1711 if (mprotect((char *)r->p + rnewsz - 1712 mopts.malloc_guard, mopts.malloc_guard, 1713 PROT_NONE)) 1714 wrterror(pool, "mprotect"); 1715 } 1716 if (munmap((char *)r->p + rnewsz, roldsz - rnewsz)) 1717 wrterror(pool, "munmap %p", (char *)r->p + 1718 rnewsz); 1719 STATS_SUB(pool->malloc_used, roldsz - rnewsz); 1720 r->size = gnewsz; 1721 if (MALLOC_MOVE_COND(gnewsz)) { 1722 void *pp = MALLOC_MOVE(r->p, gnewsz); 1723 memmove(pp, p, newsz); 1724 p = pp; 1725 } else if (mopts.chunk_canaries) 1726 fill_canary(p, newsz, PAGEROUND(newsz)); 1727 STATS_SETF(r, f); 1728 ret = p; 1729 goto done; 1730 } else { 1731 /* number of pages remains the same */ 1732 void *pp = r->p; 1733 1734 r->size = gnewsz; 1735 if (MALLOC_MOVE_COND(gnewsz)) 1736 pp = MALLOC_MOVE(r->p, gnewsz); 1737 if (p != pp) { 1738 memmove(pp, p, oldsz < newsz ? oldsz : newsz); 1739 p = pp; 1740 } 1741 if (p == r->p) { 1742 if (newsz > oldsz && pool->malloc_junk == 2) 1743 memset((char *)p + newsz, SOME_JUNK, 1744 rnewsz - mopts.malloc_guard - 1745 newsz); 1746 if (mopts.chunk_canaries) 1747 fill_canary(p, newsz, PAGEROUND(newsz)); 1748 } 1749 STATS_SETF(r, f); 1750 ret = p; 1751 goto done; 1752 } 1753 } 1754 if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 && 1755 newsz <= MALLOC_MAXCHUNK && newsz > 0 && 1756 1 << find_chunksize(newsz) == oldsz && !forced) { 1757 /* do not reallocate if new size fits good in existing chunk */ 1758 if (pool->malloc_junk == 2) 1759 memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); 1760 if (mopts.chunk_canaries) { 1761 info->bits[info->offset + chunknum] = newsz; 1762 fill_canary(p, newsz, info->size); 1763 } 1764 STATS_SETF(r, f); 1765 ret = p; 1766 } else if (newsz != oldsz || forced) { 1767 /* create new allocation */ 1768 q = omalloc(pool, newsz, 0, f); 1769 if (q == NULL) { 1770 ret = NULL; 1771 goto done; 1772 } 1773 if (newsz != 0 && oldsz != 0) 1774 memcpy(q, p, oldsz < newsz ? oldsz : newsz); 1775 ofree(&pool, p, 0, 0, 0); 1776 ret = q; 1777 } else { 1778 /* oldsz == newsz */ 1779 if (newsz != 0) 1780 wrterror(pool, "realloc internal inconsistency"); 1781 STATS_SETF(r, f); 1782 ret = p; 1783 } 1784done: 1785 if (*argpool != pool) { 1786 pool->func = saved_function; 1787 *argpool = pool; 1788 } 1789 return ret; 1790} 1791 1792void * 1793realloc(void *ptr, size_t size) 1794{ 1795 struct dir_info *d; 1796 void *r; 1797 int saved_errno = errno; 1798 1799 PROLOGUE(getpool(), "realloc") 1800 r = orealloc(&d, ptr, size, CALLER); 1801 EPILOGUE() 1802 return r; 1803} 1804/*DEF_STRONG(realloc);*/ 1805 1806/* 1807 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX 1808 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW 1809 */ 1810#define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 1811 1812void * 1813calloc(size_t nmemb, size_t size) 1814{ 1815 struct dir_info *d; 1816 void *r; 1817 int saved_errno = errno; 1818 1819 PROLOGUE(getpool(), "calloc") 1820 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1821 nmemb > 0 && SIZE_MAX / nmemb < size) { 1822 d->active--; 1823 _MALLOC_UNLOCK(d->mutex); 1824 if (mopts.malloc_xmalloc) 1825 wrterror(d, "out of memory"); 1826 errno = ENOMEM; 1827 return NULL; 1828 } 1829 1830 size *= nmemb; 1831 r = omalloc(d, size, 1, CALLER); 1832 EPILOGUE() 1833 return r; 1834} 1835/*DEF_STRONG(calloc);*/ 1836 1837void * 1838calloc_conceal(size_t nmemb, size_t size) 1839{ 1840 struct dir_info *d; 1841 void *r; 1842 int saved_errno = errno; 1843 1844 PROLOGUE(mopts.malloc_pool[0], "calloc_conceal") 1845 if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1846 nmemb > 0 && SIZE_MAX / nmemb < size) { 1847 d->active--; 1848 _MALLOC_UNLOCK(d->mutex); 1849 if (mopts.malloc_xmalloc) 1850 wrterror(d, "out of memory"); 1851 errno = ENOMEM; 1852 return NULL; 1853 } 1854 1855 size *= nmemb; 1856 r = omalloc(d, size, 1, CALLER); 1857 EPILOGUE() 1858 return r; 1859} 1860DEF_WEAK(calloc_conceal); 1861 1862static void * 1863orecallocarray(struct dir_info **argpool, void *p, size_t oldsize, 1864 size_t newsize, void *f) 1865{ 1866 struct region_info *r; 1867 struct dir_info *pool; 1868 char *saved_function; 1869 void *newptr; 1870 size_t sz; 1871 1872 if (p == NULL) 1873 return omalloc(*argpool, newsize, 1, f); 1874 1875 if (oldsize == newsize) 1876 return p; 1877 1878 r = findpool(p, *argpool, &pool, &saved_function); 1879 1880 REALSIZE(sz, r); 1881 if (sz <= MALLOC_MAXCHUNK) { 1882 if (mopts.chunk_canaries && sz > 0) { 1883 struct chunk_info *info = (struct chunk_info *)r->size; 1884 uint32_t chunknum = find_chunknum(pool, info, p, 0); 1885 1886 if (info->bits[info->offset + chunknum] != oldsize) 1887 wrterror(pool, "recorded old size %hu != %zu", 1888 info->bits[info->offset + chunknum], 1889 oldsize); 1890 } 1891 } else if (oldsize < (sz - mopts.malloc_guard) / 2) 1892 wrterror(pool, "recorded old size %zu != %zu", 1893 sz - mopts.malloc_guard, oldsize); 1894 1895 newptr = omalloc(pool, newsize, 0, f); 1896 if (newptr == NULL) 1897 goto done; 1898 1899 if (newsize > oldsize) { 1900 memcpy(newptr, p, oldsize); 1901 memset((char *)newptr + oldsize, 0, newsize - oldsize); 1902 } else 1903 memcpy(newptr, p, newsize); 1904 1905 ofree(&pool, p, 1, 0, oldsize); 1906 1907done: 1908 if (*argpool != pool) { 1909 pool->func = saved_function; 1910 *argpool = pool; 1911 } 1912 1913 return newptr; 1914} 1915 1916static void * 1917recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) 1918{ 1919 size_t oldsize, newsize; 1920 void *newptr; 1921 1922 if (ptr == NULL) 1923 return calloc(newnmemb, size); 1924 1925 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1926 newnmemb > 0 && SIZE_MAX / newnmemb < size) { 1927 errno = ENOMEM; 1928 return NULL; 1929 } 1930 newsize = newnmemb * size; 1931 1932 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1933 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) { 1934 errno = EINVAL; 1935 return NULL; 1936 } 1937 oldsize = oldnmemb * size; 1938 1939 /* 1940 * Don't bother too much if we're shrinking just a bit, 1941 * we do not shrink for series of small steps, oh well. 1942 */ 1943 if (newsize <= oldsize) { 1944 size_t d = oldsize - newsize; 1945 1946 if (d < oldsize / 2 && d < MALLOC_PAGESIZE) { 1947 memset((char *)ptr + newsize, 0, d); 1948 return ptr; 1949 } 1950 } 1951 1952 newptr = malloc(newsize); 1953 if (newptr == NULL) 1954 return NULL; 1955 1956 if (newsize > oldsize) { 1957 memcpy(newptr, ptr, oldsize); 1958 memset((char *)newptr + oldsize, 0, newsize - oldsize); 1959 } else 1960 memcpy(newptr, ptr, newsize); 1961 1962 explicit_bzero(ptr, oldsize); 1963 free(ptr); 1964 1965 return newptr; 1966} 1967 1968void * 1969recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) 1970{ 1971 struct dir_info *d; 1972 size_t oldsize = 0, newsize; 1973 void *r; 1974 int saved_errno = errno; 1975 1976 if (!mopts.internal_funcs) 1977 return recallocarray_p(ptr, oldnmemb, newnmemb, size); 1978 1979 PROLOGUE(getpool(), "recallocarray") 1980 1981 if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1982 newnmemb > 0 && SIZE_MAX / newnmemb < size) { 1983 d->active--; 1984 _MALLOC_UNLOCK(d->mutex); 1985 if (mopts.malloc_xmalloc) 1986 wrterror(d, "out of memory"); 1987 errno = ENOMEM; 1988 return NULL; 1989 } 1990 newsize = newnmemb * size; 1991 1992 if (ptr != NULL) { 1993 if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 1994 oldnmemb > 0 && SIZE_MAX / oldnmemb < size) { 1995 d->active--; 1996 _MALLOC_UNLOCK(d->mutex); 1997 errno = EINVAL; 1998 return NULL; 1999 } 2000 oldsize = oldnmemb * size; 2001 } 2002 2003 r = orecallocarray(&d, ptr, oldsize, newsize, CALLER); 2004 EPILOGUE() 2005 return r; 2006} 2007DEF_WEAK(recallocarray); 2008 2009static void * 2010mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) 2011{ 2012 char *p, *q; 2013 2014 if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) 2015 wrterror(d, "mapalign bad alignment"); 2016 if (sz != PAGEROUND(sz)) 2017 wrterror(d, "mapalign round"); 2018 2019 /* Allocate sz + alignment bytes of memory, which must include a 2020 * subrange of size bytes that is properly aligned. Unmap the 2021 * other bytes, and then return that subrange. 2022 */ 2023 2024 /* We need sz + alignment to fit into a size_t. */ 2025 if (alignment > SIZE_MAX - sz) 2026 return MAP_FAILED; 2027 2028 p = map(d, sz + alignment, zero_fill); 2029 if (p == MAP_FAILED) 2030 return MAP_FAILED; 2031 q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); 2032 if (q != p) { 2033 if (munmap(p, q - p)) 2034 wrterror(d, "munmap %p", p); 2035 } 2036 if (munmap(q + sz, alignment - (q - p))) 2037 wrterror(d, "munmap %p", q + sz); 2038 STATS_SUB(d->malloc_used, alignment); 2039 2040 return q; 2041} 2042 2043static void * 2044omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, 2045 void *f) 2046{ 2047 size_t psz; 2048 void *p; 2049 2050 /* If between half a page and a page, avoid MALLOC_MOVE. */ 2051 if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE) 2052 sz = MALLOC_PAGESIZE; 2053 if (alignment <= MALLOC_PAGESIZE) { 2054 /* 2055 * max(size, alignment) is enough to assure the requested 2056 * alignment, since the allocator always allocates 2057 * power-of-two blocks. 2058 */ 2059 if (sz < alignment) 2060 sz = alignment; 2061 return omalloc(pool, sz, zero_fill, f); 2062 } 2063 2064 if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { 2065 errno = ENOMEM; 2066 return NULL; 2067 } 2068 2069 if (sz < MALLOC_PAGESIZE) 2070 sz = MALLOC_PAGESIZE; 2071 sz += mopts.malloc_guard; 2072 psz = PAGEROUND(sz); 2073 2074 p = mapalign(pool, alignment, psz, zero_fill); 2075 if (p == MAP_FAILED) { 2076 errno = ENOMEM; 2077 return NULL; 2078 } 2079 2080 if (insert(pool, p, sz, f)) { 2081 unmap(pool, p, psz, 0); 2082 errno = ENOMEM; 2083 return NULL; 2084 } 2085 2086 if (mopts.malloc_guard) { 2087 if (mprotect((char *)p + psz - mopts.malloc_guard, 2088 mopts.malloc_guard, PROT_NONE)) 2089 wrterror(pool, "mprotect"); 2090 STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); 2091 } 2092 2093 if (pool->malloc_junk == 2) { 2094 if (zero_fill) 2095 memset((char *)p + sz - mopts.malloc_guard, 2096 SOME_JUNK, psz - sz); 2097 else 2098 memset(p, SOME_JUNK, psz - mopts.malloc_guard); 2099 } else if (mopts.chunk_canaries) 2100 fill_canary(p, sz - mopts.malloc_guard, 2101 psz - mopts.malloc_guard); 2102 2103 return p; 2104} 2105 2106int 2107posix_memalign(void **memptr, size_t alignment, size_t size) 2108{ 2109 struct dir_info *d; 2110 int res, saved_errno = errno; 2111 void *r; 2112 2113 /* Make sure that alignment is a large enough power of 2. */ 2114 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) 2115 return EINVAL; 2116 2117 d = getpool(); 2118 if (d == NULL) { 2119 _malloc_init(0); 2120 d = getpool(); 2121 } 2122 _MALLOC_LOCK(d->mutex); 2123 d->func = "posix_memalign"; 2124 if (d->active++) { 2125 malloc_recurse(d); 2126 goto err; 2127 } 2128 r = omemalign(d, alignment, size, 0, CALLER); 2129 d->active--; 2130 _MALLOC_UNLOCK(d->mutex); 2131 if (r == NULL) { 2132 if (mopts.malloc_xmalloc) 2133 wrterror(d, "out of memory"); 2134 goto err; 2135 } 2136 errno = saved_errno; 2137 *memptr = r; 2138 return 0; 2139 2140err: 2141 res = errno; 2142 errno = saved_errno; 2143 return res; 2144} 2145/*DEF_STRONG(posix_memalign);*/ 2146 2147void * 2148aligned_alloc(size_t alignment, size_t size) 2149{ 2150 struct dir_info *d; 2151 int saved_errno = errno; 2152 void *r; 2153 2154 /* Make sure that alignment is a positive power of 2. */ 2155 if (((alignment - 1) & alignment) != 0 || alignment == 0) { 2156 errno = EINVAL; 2157 return NULL; 2158 }; 2159 /* Per spec, size should be a multiple of alignment */ 2160 if ((size & (alignment - 1)) != 0) { 2161 errno = EINVAL; 2162 return NULL; 2163 } 2164 2165 PROLOGUE(getpool(), "aligned_alloc") 2166 r = omemalign(d, alignment, size, 0, CALLER); 2167 EPILOGUE() 2168 return r; 2169} 2170/*DEF_STRONG(aligned_alloc);*/ 2171 2172#ifdef MALLOC_STATS 2173 2174struct malloc_leak { 2175 void *f; 2176 size_t total_size; 2177 int count; 2178}; 2179 2180struct leaknode { 2181 RBT_ENTRY(leaknode) entry; 2182 struct malloc_leak d; 2183}; 2184 2185static inline int 2186leakcmp(const struct leaknode *e1, const struct leaknode *e2) 2187{ 2188 return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; 2189} 2190 2191static RBT_HEAD(leaktree, leaknode) leakhead; 2192RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp); 2193RBT_GENERATE(leaktree, leaknode, entry, leakcmp); 2194 2195static void 2196putleakinfo(void *f, size_t sz, int cnt) 2197{ 2198 struct leaknode key, *p; 2199 static struct leaknode *page; 2200 static unsigned int used; 2201 2202 if (cnt == 0 || page == MAP_FAILED) 2203 return; 2204 2205 key.d.f = f; 2206 p = RBT_FIND(leaktree, &leakhead, &key); 2207 if (p == NULL) { 2208 if (page == NULL || 2209 used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { 2210 page = MMAP(MALLOC_PAGESIZE, 0); 2211 if (page == MAP_FAILED) 2212 return; 2213 used = 0; 2214 } 2215 p = &page[used++]; 2216 p->d.f = f; 2217 p->d.total_size = sz * cnt; 2218 p->d.count = cnt; 2219 RBT_INSERT(leaktree, &leakhead, p); 2220 } else { 2221 p->d.total_size += sz * cnt; 2222 p->d.count += cnt; 2223 } 2224} 2225 2226static struct malloc_leak *malloc_leaks; 2227 2228static void 2229dump_leaks(int fd) 2230{ 2231 struct leaknode *p; 2232 unsigned int i = 0; 2233 2234 dprintf(fd, "Leak report\n"); 2235 dprintf(fd, " f sum # avg\n"); 2236 /* XXX only one page of summary */ 2237 if (malloc_leaks == NULL) 2238 malloc_leaks = MMAP(MALLOC_PAGESIZE, 0); 2239 if (malloc_leaks != MAP_FAILED) 2240 memset(malloc_leaks, 0, MALLOC_PAGESIZE); 2241 RBT_FOREACH(p, leaktree, &leakhead) { 2242 dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f, 2243 p->d.total_size, p->d.count, p->d.total_size / p->d.count); 2244 if (malloc_leaks == MAP_FAILED || 2245 i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) 2246 continue; 2247 malloc_leaks[i].f = p->d.f; 2248 malloc_leaks[i].total_size = p->d.total_size; 2249 malloc_leaks[i].count = p->d.count; 2250 i++; 2251 } 2252} 2253 2254static void 2255dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) 2256{ 2257 while (p != NULL) { 2258 dprintf(fd, "chunk %18p %18p %4d %d/%d\n", 2259 p->page, ((p->bits[0] & 1) ? NULL : f), 2260 p->size, p->free, p->total); 2261 if (!fromfreelist) { 2262 if (p->bits[0] & 1) 2263 putleakinfo(NULL, p->size, p->total - p->free); 2264 else { 2265 putleakinfo(f, p->size, 1); 2266 putleakinfo(NULL, p->size, 2267 p->total - p->free - 1); 2268 } 2269 break; 2270 } 2271 p = LIST_NEXT(p, entries); 2272 if (p != NULL) 2273 dprintf(fd, " "); 2274 } 2275} 2276 2277static void 2278dump_free_chunk_info(int fd, struct dir_info *d) 2279{ 2280 int i, j, count; 2281 struct chunk_info *p; 2282 2283 dprintf(fd, "Free chunk structs:\n"); 2284 for (i = 0; i <= MALLOC_MAXSHIFT; i++) { 2285 count = 0; 2286 LIST_FOREACH(p, &d->chunk_info_list[i], entries) 2287 count++; 2288 for (j = 0; j < MALLOC_CHUNK_LISTS; j++) { 2289 p = LIST_FIRST(&d->chunk_dir[i][j]); 2290 if (p == NULL && count == 0) 2291 continue; 2292 dprintf(fd, "%2d) %3d ", i, count); 2293 if (p != NULL) 2294 dump_chunk(fd, p, NULL, 1); 2295 else 2296 dprintf(fd, "\n"); 2297 } 2298 } 2299 2300} 2301 2302static void 2303dump_free_page_info(int fd, struct dir_info *d) 2304{ 2305 struct smallcache *cache; 2306 size_t i, total = 0; 2307 2308 dprintf(fd, "Cached in small cache:\n"); 2309 for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) { 2310 cache = &d->smallcache[i]; 2311 if (cache->length != 0) 2312 dprintf(fd, "%zu(%u): %u = %zu\n", i + 1, cache->max, 2313 cache->length, cache->length * (i + 1)); 2314 total += cache->length * (i + 1); 2315 } 2316 2317 dprintf(fd, "Cached in big cache: %zu/%zu\n", d->bigcache_used, 2318 d->bigcache_size); 2319 for (i = 0; i < d->bigcache_size; i++) { 2320 if (d->bigcache[i].psize != 0) 2321 dprintf(fd, "%zu: %zu\n", i, d->bigcache[i].psize); 2322 total += d->bigcache[i].psize; 2323 } 2324 dprintf(fd, "Free pages cached: %zu\n", total); 2325} 2326 2327static void 2328malloc_dump1(int fd, int poolno, struct dir_info *d) 2329{ 2330 size_t i, realsize; 2331 2332 dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); 2333 if (d == NULL) 2334 return; 2335 dprintf(fd, "J=%d Fl=%x\n", d->malloc_junk, d->mmap_flag); 2336 dprintf(fd, "Region slots free %zu/%zu\n", 2337 d->regions_free, d->regions_total); 2338 dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions); 2339 dprintf(fd, "Inserts %zu/%zu\n", d->inserts, d->insert_collisions); 2340 dprintf(fd, "Deletes %zu/%zu\n", d->deletes, d->delete_moves); 2341 dprintf(fd, "Cheap reallocs %zu/%zu\n", 2342 d->cheap_reallocs, d->cheap_realloc_tries); 2343 dprintf(fd, "Other pool searches %zu/%zu\n", 2344 d->other_pool, d->pool_searches); 2345 dprintf(fd, "In use %zu\n", d->malloc_used); 2346 dprintf(fd, "Guarded %zu\n", d->malloc_guarded); 2347 dump_free_chunk_info(fd, d); 2348 dump_free_page_info(fd, d); 2349 dprintf(fd, 2350 "slot) hash d type page f " 2351 "size [free/n]\n"); 2352 for (i = 0; i < d->regions_total; i++) { 2353 if (d->r[i].p != NULL) { 2354 size_t h = hash(d->r[i].p) & 2355 (d->regions_total - 1); 2356 dprintf(fd, "%4zx) #%4zx %zd ", 2357 i, h, h - i); 2358 REALSIZE(realsize, &d->r[i]); 2359 if (realsize > MALLOC_MAXCHUNK) { 2360 putleakinfo(d->r[i].f, realsize, 1); 2361 dprintf(fd, 2362 "pages %18p %18p %zu\n", d->r[i].p, 2363 d->r[i].f, realsize); 2364 } else 2365 dump_chunk(fd, 2366 (struct chunk_info *)d->r[i].size, 2367 d->r[i].f, 0); 2368 } 2369 } 2370 dump_leaks(fd); 2371 dprintf(fd, "\n"); 2372} 2373 2374void 2375malloc_dump(int fd, int poolno, struct dir_info *pool) 2376{ 2377 int i; 2378 void *p; 2379 struct region_info *r; 2380 int saved_errno = errno; 2381 2382 if (pool == NULL) 2383 return; 2384 for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { 2385 p = pool->delayed_chunks[i]; 2386 if (p == NULL) 2387 continue; 2388 r = find(pool, p); 2389 if (r == NULL) 2390 wrterror(pool, "bogus pointer in malloc_dump %p", p); 2391 free_bytes(pool, r, p); 2392 pool->delayed_chunks[i] = NULL; 2393 } 2394 /* XXX leak when run multiple times */ 2395 RBT_INIT(leaktree, &leakhead); 2396 malloc_dump1(fd, poolno, pool); 2397 errno = saved_errno; 2398} 2399DEF_WEAK(malloc_dump); 2400 2401void 2402malloc_gdump(int fd) 2403{ 2404 int i; 2405 int saved_errno = errno; 2406 2407 for (i = 0; i < mopts.malloc_mutexes; i++) 2408 malloc_dump(fd, i, mopts.malloc_pool[i]); 2409 2410 errno = saved_errno; 2411} 2412DEF_WEAK(malloc_gdump); 2413 2414static void 2415malloc_exit(void) 2416{ 2417 int save_errno = errno, fd; 2418 unsigned i; 2419 2420 fd = open("malloc.out", O_RDWR|O_APPEND); 2421 if (fd != -1) { 2422 dprintf(fd, "******** Start dump %s *******\n", __progname); 2423 dprintf(fd, 2424 "MT=%d M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u " 2425 "G=%zu\n", 2426 mopts.malloc_mt, mopts.malloc_mutexes, 2427 mopts.internal_funcs, mopts.malloc_freecheck, 2428 mopts.malloc_freeunmap, mopts.def_malloc_junk, 2429 mopts.malloc_realloc, mopts.malloc_xmalloc, 2430 mopts.chunk_canaries, mopts.def_maxcache, 2431 mopts.malloc_guard); 2432 2433 for (i = 0; i < mopts.malloc_mutexes; i++) 2434 malloc_dump(fd, i, mopts.malloc_pool[i]); 2435 dprintf(fd, "******** End dump %s *******\n", __progname); 2436 close(fd); 2437 } else 2438 dprintf(STDERR_FILENO, 2439 "malloc() warning: Couldn't dump stats\n"); 2440 errno = save_errno; 2441} 2442 2443#endif /* MALLOC_STATS */ 2444