1/******************************************************************************/ 2#ifdef JEMALLOC_H_TYPES 3 4#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) 5 6/* Maximum number of regions in one run. */ 7#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN) 8#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) 9 10/* 11 * Minimum redzone size. Redzones may be larger than this if necessary to 12 * preserve region alignment. 13 */ 14#define REDZONE_MINSIZE 16 15 16/* 17 * The minimum ratio of active:dirty pages per arena is computed as: 18 * 19 * (nactive >> lg_dirty_mult) >= ndirty 20 * 21 * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as 22 * many active pages as dirty pages. 23 */ 24#define LG_DIRTY_MULT_DEFAULT 3 25 26typedef enum { 27 purge_mode_ratio = 0, 28 purge_mode_decay = 1, 29 30 purge_mode_limit = 2 31} purge_mode_t; 32#define PURGE_DEFAULT purge_mode_ratio 33/* Default decay time in seconds. */ 34#define DECAY_TIME_DEFAULT 10 35/* Number of event ticks between time checks. */ 36#define DECAY_NTICKS_PER_UPDATE 1000 37 38typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; 39typedef struct arena_avail_links_s arena_avail_links_t; 40typedef struct arena_run_s arena_run_t; 41typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; 42typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; 43typedef struct arena_chunk_s arena_chunk_t; 44typedef struct arena_bin_info_s arena_bin_info_t; 45typedef struct arena_bin_s arena_bin_t; 46typedef struct arena_s arena_t; 47typedef struct arena_tdata_s arena_tdata_t; 48 49#endif /* JEMALLOC_H_TYPES */ 50/******************************************************************************/ 51#ifdef JEMALLOC_H_STRUCTS 52 53#ifdef JEMALLOC_ARENA_STRUCTS_A 54struct arena_run_s { 55 /* Index of bin this run is associated with. */ 56 szind_t binind; 57 58 /* Number of free regions in run. */ 59 unsigned nfree; 60 61 /* Per region allocated/deallocated bitmap. */ 62 bitmap_t bitmap[BITMAP_GROUPS_MAX]; 63}; 64 65/* Each element of the chunk map corresponds to one page within the chunk. */ 66struct arena_chunk_map_bits_s { 67 /* 68 * Run address (or size) and various flags are stored together. The bit 69 * layout looks like (assuming 32-bit system): 70 * 71 * ???????? ???????? ???nnnnn nnndumla 72 * 73 * ? : Unallocated: Run address for first/last pages, unset for internal 74 * pages. 75 * Small: Run page offset. 76 * Large: Run page count for first page, unset for trailing pages. 77 * n : binind for small size class, BININD_INVALID for large size class. 78 * d : dirty? 79 * u : unzeroed? 80 * m : decommitted? 81 * l : large? 82 * a : allocated? 83 * 84 * Following are example bit patterns for the three types of runs. 85 * 86 * p : run page offset 87 * s : run size 88 * n : binind for size class; large objects set these to BININD_INVALID 89 * x : don't care 90 * - : 0 91 * + : 1 92 * [DUMLA] : bit set 93 * [dumla] : bit unset 94 * 95 * Unallocated (clean): 96 * ssssssss ssssssss sss+++++ +++dum-a 97 * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx 98 * ssssssss ssssssss sss+++++ +++dUm-a 99 * 100 * Unallocated (dirty): 101 * ssssssss ssssssss sss+++++ +++D-m-a 102 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 103 * ssssssss ssssssss sss+++++ +++D-m-a 104 * 105 * Small: 106 * pppppppp pppppppp pppnnnnn nnnd---A 107 * pppppppp pppppppp pppnnnnn nnn----A 108 * pppppppp pppppppp pppnnnnn nnnd---A 109 * 110 * Large: 111 * ssssssss ssssssss sss+++++ +++D--LA 112 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 113 * -------- -------- ---+++++ +++D--LA 114 * 115 * Large (sampled, size <= LARGE_MINCLASS): 116 * ssssssss ssssssss sssnnnnn nnnD--LA 117 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 118 * -------- -------- ---+++++ +++D--LA 119 * 120 * Large (not sampled, size == LARGE_MINCLASS): 121 * ssssssss ssssssss sss+++++ +++D--LA 122 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 123 * -------- -------- ---+++++ +++D--LA 124 */ 125 size_t bits; 126#define CHUNK_MAP_ALLOCATED ((size_t)0x01U) 127#define CHUNK_MAP_LARGE ((size_t)0x02U) 128#define CHUNK_MAP_STATE_MASK ((size_t)0x3U) 129 130#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U) 131#define CHUNK_MAP_UNZEROED ((size_t)0x08U) 132#define CHUNK_MAP_DIRTY ((size_t)0x10U) 133#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU) 134 135#define CHUNK_MAP_BININD_SHIFT 5 136#define BININD_INVALID ((size_t)0xffU) 137#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) 138#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK 139 140#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8) 141#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE) 142#define CHUNK_MAP_SIZE_MASK \ 143 (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK)) 144}; 145 146struct arena_runs_dirty_link_s { 147 qr(arena_runs_dirty_link_t) rd_link; 148}; 149 150/* 151 * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just 152 * like arena_chunk_map_bits_t. Two separate arrays are stored within each 153 * chunk header in order to improve cache locality. 154 */ 155struct arena_chunk_map_misc_s { 156 /* 157 * Linkage for run heaps. There are two disjoint uses: 158 * 159 * 1) arena_t's runs_avail heaps. 160 * 2) arena_run_t conceptually uses this linkage for in-use non-full 161 * runs, rather than directly embedding linkage. 162 */ 163 phn(arena_chunk_map_misc_t) ph_link; 164 165 union { 166 /* Linkage for list of dirty runs. */ 167 arena_runs_dirty_link_t rd; 168 169 /* Profile counters, used for large object runs. */ 170 union { 171 void *prof_tctx_pun; 172 prof_tctx_t *prof_tctx; 173 }; 174 175 /* Small region run metadata. */ 176 arena_run_t run; 177 }; 178}; 179typedef ph(arena_chunk_map_misc_t) arena_run_heap_t; 180#endif /* JEMALLOC_ARENA_STRUCTS_A */ 181 182#ifdef JEMALLOC_ARENA_STRUCTS_B 183/* Arena chunk header. */ 184struct arena_chunk_s { 185 /* 186 * A pointer to the arena that owns the chunk is stored within the node. 187 * This field as a whole is used by chunks_rtree to support both 188 * ivsalloc() and core-based debugging. 189 */ 190 extent_node_t node; 191 192 /* 193 * Map of pages within chunk that keeps track of free/large/small. The 194 * first map_bias entries are omitted, since the chunk header does not 195 * need to be tracked in the map. This omission saves a header page 196 * for common chunk sizes (e.g. 4 MiB). 197 */ 198 arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */ 199}; 200 201/* 202 * Read-only information associated with each element of arena_t's bins array 203 * is stored separately, partly to reduce memory usage (only one copy, rather 204 * than one per arena), but mainly to avoid false cacheline sharing. 205 * 206 * Each run has the following layout: 207 * 208 * /--------------------\ 209 * | pad? | 210 * |--------------------| 211 * | redzone | 212 * reg0_offset | region 0 | 213 * | redzone | 214 * |--------------------| \ 215 * | redzone | | 216 * | region 1 | > reg_interval 217 * | redzone | / 218 * |--------------------| 219 * | ... | 220 * | ... | 221 * | ... | 222 * |--------------------| 223 * | redzone | 224 * | region nregs-1 | 225 * | redzone | 226 * |--------------------| 227 * | alignment pad? | 228 * \--------------------/ 229 * 230 * reg_interval has at least the same minimum alignment as reg_size; this 231 * preserves the alignment constraint that sa2u() depends on. Alignment pad is 232 * either 0 or redzone_size; it is present only if needed to align reg0_offset. 233 */ 234struct arena_bin_info_s { 235 /* Size of regions in a run for this bin's size class. */ 236 size_t reg_size; 237 238 /* Redzone size. */ 239 size_t redzone_size; 240 241 /* Interval between regions (reg_size + (redzone_size << 1)). */ 242 size_t reg_interval; 243 244 /* Total size of a run for this bin's size class. */ 245 size_t run_size; 246 247 /* Total number of regions in a run for this bin's size class. */ 248 uint32_t nregs; 249 250 /* 251 * Metadata used to manipulate bitmaps for runs associated with this 252 * bin. 253 */ 254 bitmap_info_t bitmap_info; 255 256 /* Offset of first region in a run for this bin's size class. */ 257 uint32_t reg0_offset; 258}; 259 260struct arena_bin_s { 261 /* 262 * All operations on runcur, runs, and stats require that lock be 263 * locked. Run allocation/deallocation are protected by the arena lock, 264 * which may be acquired while holding one or more bin locks, but not 265 * vise versa. 266 */ 267 malloc_mutex_t lock; 268 269 /* 270 * Current run being used to service allocations of this bin's size 271 * class. 272 */ 273 arena_run_t *runcur; 274 275 /* 276 * Heap of non-full runs. This heap is used when looking for an 277 * existing run when runcur is no longer usable. We choose the 278 * non-full run that is lowest in memory; this policy tends to keep 279 * objects packed well, and it can also help reduce the number of 280 * almost-empty chunks. 281 */ 282 arena_run_heap_t runs; 283 284 /* Bin statistics. */ 285 malloc_bin_stats_t stats; 286}; 287 288struct arena_s { 289 /* This arena's index within the arenas array. */ 290 unsigned ind; 291 292 /* 293 * Number of threads currently assigned to this arena, synchronized via 294 * atomic operations. Each thread has two distinct assignments, one for 295 * application-serving allocation, and the other for internal metadata 296 * allocation. Internal metadata must not be allocated from arenas 297 * created via the arenas.extend mallctl, because the arena.<i>.reset 298 * mallctl indiscriminately discards all allocations for the affected 299 * arena. 300 * 301 * 0: Application allocation. 302 * 1: Internal metadata allocation. 303 */ 304 unsigned nthreads[2]; 305 306 /* 307 * There are three classes of arena operations from a locking 308 * perspective: 309 * 1) Thread assignment (modifies nthreads) is synchronized via atomics. 310 * 2) Bin-related operations are protected by bin locks. 311 * 3) Chunk- and run-related operations are protected by this mutex. 312 */ 313 malloc_mutex_t lock; 314 315 arena_stats_t stats; 316 /* 317 * List of tcaches for extant threads associated with this arena. 318 * Stats from these are merged incrementally, and at exit if 319 * opt_stats_print is enabled. 320 */ 321 ql_head(tcache_t) tcache_ql; 322 323 uint64_t prof_accumbytes; 324 325 /* 326 * PRNG state for cache index randomization of large allocation base 327 * pointers. 328 */ 329 uint64_t offset_state; 330 331 dss_prec_t dss_prec; 332 333 334 /* Extant arena chunks. */ 335 ql_head(extent_node_t) achunks; 336 337 /* 338 * In order to avoid rapid chunk allocation/deallocation when an arena 339 * oscillates right on the cusp of needing a new chunk, cache the most 340 * recently freed chunk. The spare is left in the arena's chunk trees 341 * until it is deleted. 342 * 343 * There is one spare chunk per arena, rather than one spare total, in 344 * order to avoid interactions between multiple threads that could make 345 * a single spare inadequate. 346 */ 347 arena_chunk_t *spare; 348 349 /* Minimum ratio (log base 2) of nactive:ndirty. */ 350 ssize_t lg_dirty_mult; 351 352 /* True if a thread is currently executing arena_purge_to_limit(). */ 353 bool purging; 354 355 /* Number of pages in active runs and huge regions. */ 356 size_t nactive; 357 358 /* 359 * Current count of pages within unused runs that are potentially 360 * dirty, and for which madvise(... MADV_DONTNEED) has not been called. 361 * By tracking this, we can institute a limit on how much dirty unused 362 * memory is mapped for each arena. 363 */ 364 size_t ndirty; 365 366 /* 367 * Unused dirty memory this arena manages. Dirty memory is conceptually 368 * tracked as an arbitrarily interleaved LRU of dirty runs and cached 369 * chunks, but the list linkage is actually semi-duplicated in order to 370 * avoid extra arena_chunk_map_misc_t space overhead. 371 * 372 * LRU-----------------------------------------------------------MRU 373 * 374 * /-- arena ---\ 375 * | | 376 * | | 377 * |------------| /- chunk -\ 378 * ...->|chunks_cache|<--------------------------->| /----\ |<--... 379 * |------------| | |node| | 380 * | | | | | | 381 * | | /- run -\ /- run -\ | | | | 382 * | | | | | | | | | | 383 * | | | | | | | | | | 384 * |------------| |-------| |-------| | |----| | 385 * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----... 386 * |------------| |-------| |-------| | |----| | 387 * | | | | | | | | | | 388 * | | | | | | | \----/ | 389 * | | \-------/ \-------/ | | 390 * | | | | 391 * | | | | 392 * \------------/ \---------/ 393 */ 394 arena_runs_dirty_link_t runs_dirty; 395 extent_node_t chunks_cache; 396 397 /* 398 * Approximate time in seconds from the creation of a set of unused 399 * dirty pages until an equivalent set of unused dirty pages is purged 400 * and/or reused. 401 */ 402 ssize_t decay_time; 403 /* decay_time / SMOOTHSTEP_NSTEPS. */ 404 nstime_t decay_interval; 405 /* 406 * Time at which the current decay interval logically started. We do 407 * not actually advance to a new epoch until sometime after it starts 408 * because of scheduling and computation delays, and it is even possible 409 * to completely skip epochs. In all cases, during epoch advancement we 410 * merge all relevant activity into the most recently recorded epoch. 411 */ 412 nstime_t decay_epoch; 413 /* decay_deadline randomness generator. */ 414 uint64_t decay_jitter_state; 415 /* 416 * Deadline for current epoch. This is the sum of decay_interval and 417 * per epoch jitter which is a uniform random variable in 418 * [0..decay_interval). Epochs always advance by precise multiples of 419 * decay_interval, but we randomize the deadline to reduce the 420 * likelihood of arenas purging in lockstep. 421 */ 422 nstime_t decay_deadline; 423 /* 424 * Number of dirty pages at beginning of current epoch. During epoch 425 * advancement we use the delta between decay_ndirty and ndirty to 426 * determine how many dirty pages, if any, were generated, and record 427 * the result in decay_backlog. 428 */ 429 size_t decay_ndirty; 430 /* 431 * Memoized result of arena_decay_backlog_npages_limit() corresponding 432 * to the current contents of decay_backlog, i.e. the limit on how many 433 * pages are allowed to exist for the decay epochs. 434 */ 435 size_t decay_backlog_npages_limit; 436 /* 437 * Trailing log of how many unused dirty pages were generated during 438 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last 439 * element is the most recent epoch. Corresponding epoch times are 440 * relative to decay_epoch. 441 */ 442 size_t decay_backlog[SMOOTHSTEP_NSTEPS]; 443 444 /* Extant huge allocations. */ 445 ql_head(extent_node_t) huge; 446 /* Synchronizes all huge allocation/update/deallocation. */ 447 malloc_mutex_t huge_mtx; 448 449 /* 450 * Trees of chunks that were previously allocated (trees differ only in 451 * node ordering). These are used when allocating chunks, in an attempt 452 * to re-use address space. Depending on function, different tree 453 * orderings are needed, which is why there are two trees with the same 454 * contents. 455 */ 456 extent_tree_t chunks_szad_cached; 457 extent_tree_t chunks_ad_cached; 458 extent_tree_t chunks_szad_retained; 459 extent_tree_t chunks_ad_retained; 460 461 malloc_mutex_t chunks_mtx; 462 /* Cache of nodes that were allocated via base_alloc(). */ 463 ql_head(extent_node_t) node_cache; 464 malloc_mutex_t node_cache_mtx; 465 466 /* User-configurable chunk hook functions. */ 467 chunk_hooks_t chunk_hooks; 468 469 /* bins is used to store trees of free regions. */ 470 arena_bin_t bins[NBINS]; 471 472 /* 473 * Quantized address-ordered heaps of this arena's available runs. The 474 * heaps are used for first-best-fit run allocation. 475 */ 476 arena_run_heap_t runs_avail[1]; /* Dynamically sized. */ 477}; 478 479/* Used in conjunction with tsd for fast arena-related context lookup. */ 480struct arena_tdata_s { 481 ticker_t decay_ticker; 482}; 483#endif /* JEMALLOC_ARENA_STRUCTS_B */ 484 485#endif /* JEMALLOC_H_STRUCTS */ 486/******************************************************************************/ 487#ifdef JEMALLOC_H_EXTERNS 488 489static const size_t large_pad = 490#ifdef JEMALLOC_CACHE_OBLIVIOUS 491 PAGE 492#else 493 0 494#endif 495 ; 496 497extern purge_mode_t opt_purge; 498extern const char *purge_mode_names[]; 499extern ssize_t opt_lg_dirty_mult; 500extern ssize_t opt_decay_time; 501 502extern arena_bin_info_t arena_bin_info[NBINS]; 503 504extern size_t map_bias; /* Number of arena chunk header pages. */ 505extern size_t map_misc_offset; 506extern size_t arena_maxrun; /* Max run size for arenas. */ 507extern size_t large_maxclass; /* Max large size class. */ 508extern size_t run_quantize_max; /* Max run_quantize_*() input. */ 509extern unsigned nlclasses; /* Number of large size classes. */ 510extern unsigned nhclasses; /* Number of huge size classes. */ 511 512#ifdef JEMALLOC_JET 513typedef size_t (run_quantize_t)(size_t); 514extern run_quantize_t *run_quantize_floor; 515extern run_quantize_t *run_quantize_ceil; 516#endif 517void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, 518 bool cache); 519void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, 520 bool cache); 521extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); 522void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); 523void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, 524 size_t alignment, bool *zero); 525void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, 526 size_t usize); 527void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, 528 void *chunk, size_t oldsize, size_t usize); 529void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, 530 void *chunk, size_t oldsize, size_t usize); 531bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, 532 void *chunk, size_t oldsize, size_t usize, bool *zero); 533ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); 534bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, 535 ssize_t lg_dirty_mult); 536ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); 537bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); 538void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); 539void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); 540void arena_reset(tsd_t *tsd, arena_t *arena); 541void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, 542 tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); 543void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, 544 bool zero); 545#ifdef JEMALLOC_JET 546typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, 547 uint8_t); 548extern arena_redzone_corruption_t *arena_redzone_corruption; 549typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); 550extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; 551#else 552void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); 553#endif 554void arena_quarantine_junk_small(void *ptr, size_t usize); 555void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, 556 bool zero); 557void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, 558 szind_t ind, bool zero); 559void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, 560 size_t alignment, bool zero, tcache_t *tcache); 561void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); 562void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, 563 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); 564void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 565 void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); 566void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 567 void *ptr, size_t pageind); 568#ifdef JEMALLOC_JET 569typedef void (arena_dalloc_junk_large_t)(void *, size_t); 570extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; 571#else 572void arena_dalloc_junk_large(void *ptr, size_t usize); 573#endif 574void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, 575 arena_chunk_t *chunk, void *ptr); 576void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 577 void *ptr); 578#ifdef JEMALLOC_JET 579typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); 580extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; 581#endif 582bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, 583 size_t size, size_t extra, bool zero); 584void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, 585 size_t size, size_t alignment, bool zero, tcache_t *tcache); 586dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); 587bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); 588ssize_t arena_lg_dirty_mult_default_get(void); 589bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); 590ssize_t arena_decay_time_default_get(void); 591bool arena_decay_time_default_set(ssize_t decay_time); 592void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, 593 unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, 594 ssize_t *decay_time, size_t *nactive, size_t *ndirty); 595void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 596 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 597 size_t *nactive, size_t *ndirty, arena_stats_t *astats, 598 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, 599 malloc_huge_stats_t *hstats); 600unsigned arena_nthreads_get(arena_t *arena, bool internal); 601void arena_nthreads_inc(arena_t *arena, bool internal); 602void arena_nthreads_dec(arena_t *arena, bool internal); 603arena_t *arena_new(tsdn_t *tsdn, unsigned ind); 604bool arena_boot(void); 605void arena_prefork0(tsdn_t *tsdn, arena_t *arena); 606void arena_prefork1(tsdn_t *tsdn, arena_t *arena); 607void arena_prefork2(tsdn_t *tsdn, arena_t *arena); 608void arena_prefork3(tsdn_t *tsdn, arena_t *arena); 609void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); 610void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); 611 612#endif /* JEMALLOC_H_EXTERNS */ 613/******************************************************************************/ 614#ifdef JEMALLOC_H_INLINES 615 616#ifndef JEMALLOC_ENABLE_INLINE 617arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk, 618 size_t pageind); 619const arena_chunk_map_bits_t *arena_bitselm_get_const( 620 const arena_chunk_t *chunk, size_t pageind); 621arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk, 622 size_t pageind); 623const arena_chunk_map_misc_t *arena_miscelm_get_const( 624 const arena_chunk_t *chunk, size_t pageind); 625size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm); 626void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm); 627arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); 628arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); 629size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind); 630const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk, 631 size_t pageind); 632size_t arena_mapbitsp_read(const size_t *mapbitsp); 633size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind); 634size_t arena_mapbits_size_decode(size_t mapbits); 635size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, 636 size_t pageind); 637size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, 638 size_t pageind); 639size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, 640 size_t pageind); 641szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind); 642size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind); 643size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind); 644size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, 645 size_t pageind); 646size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind); 647size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind); 648void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); 649size_t arena_mapbits_size_encode(size_t size); 650void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, 651 size_t size, size_t flags); 652void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 653 size_t size); 654void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, 655 size_t flags); 656void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, 657 size_t size, size_t flags); 658void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 659 szind_t binind); 660void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, 661 size_t runind, szind_t binind, size_t flags); 662void arena_metadata_allocated_add(arena_t *arena, size_t size); 663void arena_metadata_allocated_sub(arena_t *arena, size_t size); 664size_t arena_metadata_allocated_get(arena_t *arena); 665bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); 666bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); 667bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); 668szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); 669szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); 670size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, 671 const void *ptr); 672prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); 673void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, 674 prof_tctx_t *tctx); 675void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, 676 const void *old_ptr, prof_tctx_t *old_tctx); 677void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); 678void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); 679void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, 680 bool zero, tcache_t *tcache, bool slow_path); 681arena_t *arena_aalloc(const void *ptr); 682size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); 683void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); 684void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, 685 bool slow_path); 686#endif 687 688#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) 689# ifdef JEMALLOC_ARENA_INLINE_A 690JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * 691arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) 692{ 693 694 assert(pageind >= map_bias); 695 assert(pageind < chunk_npages); 696 697 return (&chunk->map_bits[pageind-map_bias]); 698} 699 700JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t * 701arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind) 702{ 703 704 return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind)); 705} 706 707JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * 708arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) 709{ 710 711 assert(pageind >= map_bias); 712 assert(pageind < chunk_npages); 713 714 return ((arena_chunk_map_misc_t *)((uintptr_t)chunk + 715 (uintptr_t)map_misc_offset) + pageind-map_bias); 716} 717 718JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t * 719arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind) 720{ 721#if 1 /* Work around gcc bug. */ 722 arena_chunk_t *mchunk = (arena_chunk_t *)chunk; 723 724 return (arena_miscelm_get_mutable(mchunk, pageind)); 725#else 726 return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind)); 727#endif 728} 729 730JEMALLOC_ALWAYS_INLINE size_t 731arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) 732{ 733 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 734 size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + 735 map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias; 736 737 assert(pageind >= map_bias); 738 assert(pageind < chunk_npages); 739 740 return (pageind); 741} 742 743JEMALLOC_ALWAYS_INLINE void * 744arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm) 745{ 746 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 747 size_t pageind = arena_miscelm_to_pageind(miscelm); 748 749 return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE))); 750} 751 752JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * 753arena_rd_to_miscelm(arena_runs_dirty_link_t *rd) 754{ 755 arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t 756 *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd)); 757 758 assert(arena_miscelm_to_pageind(miscelm) >= map_bias); 759 assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); 760 761 return (miscelm); 762} 763 764JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * 765arena_run_to_miscelm(arena_run_t *run) 766{ 767 arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t 768 *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run)); 769 770 assert(arena_miscelm_to_pageind(miscelm) >= map_bias); 771 assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); 772 773 return (miscelm); 774} 775 776JEMALLOC_ALWAYS_INLINE size_t * 777arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind) 778{ 779 780 return (&arena_bitselm_get_mutable(chunk, pageind)->bits); 781} 782 783JEMALLOC_ALWAYS_INLINE const size_t * 784arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind) 785{ 786#if 1 /* Work around gcc bug. */ 787 arena_chunk_t *mchunk = (arena_chunk_t *)chunk; 788 789 return (arena_mapbitsp_get_mutable(mchunk, pageind)); 790#else 791 return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind)); 792#endif 793} 794 795JEMALLOC_ALWAYS_INLINE size_t 796arena_mapbitsp_read(const size_t *mapbitsp) 797{ 798 799 return (*mapbitsp); 800} 801 802JEMALLOC_ALWAYS_INLINE size_t 803arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind) 804{ 805 806 return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind))); 807} 808 809JEMALLOC_ALWAYS_INLINE size_t 810arena_mapbits_size_decode(size_t mapbits) 811{ 812 size_t size; 813 814#if CHUNK_MAP_SIZE_SHIFT > 0 815 size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT; 816#elif CHUNK_MAP_SIZE_SHIFT == 0 817 size = mapbits & CHUNK_MAP_SIZE_MASK; 818#else 819 size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; 820#endif 821 822 return (size); 823} 824 825JEMALLOC_ALWAYS_INLINE size_t 826arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) 827{ 828 size_t mapbits; 829 830 mapbits = arena_mapbits_get(chunk, pageind); 831 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 832 return (arena_mapbits_size_decode(mapbits)); 833} 834 835JEMALLOC_ALWAYS_INLINE size_t 836arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) 837{ 838 size_t mapbits; 839 840 mapbits = arena_mapbits_get(chunk, pageind); 841 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 842 (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); 843 return (arena_mapbits_size_decode(mapbits)); 844} 845 846JEMALLOC_ALWAYS_INLINE size_t 847arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) 848{ 849 size_t mapbits; 850 851 mapbits = arena_mapbits_get(chunk, pageind); 852 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 853 CHUNK_MAP_ALLOCATED); 854 return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); 855} 856 857JEMALLOC_ALWAYS_INLINE szind_t 858arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) 859{ 860 size_t mapbits; 861 szind_t binind; 862 863 mapbits = arena_mapbits_get(chunk, pageind); 864 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 865 assert(binind < NBINS || binind == BININD_INVALID); 866 return (binind); 867} 868 869JEMALLOC_ALWAYS_INLINE size_t 870arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) 871{ 872 size_t mapbits; 873 874 mapbits = arena_mapbits_get(chunk, pageind); 875 assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & 876 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); 877 return (mapbits & CHUNK_MAP_DIRTY); 878} 879 880JEMALLOC_ALWAYS_INLINE size_t 881arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) 882{ 883 size_t mapbits; 884 885 mapbits = arena_mapbits_get(chunk, pageind); 886 assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & 887 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); 888 return (mapbits & CHUNK_MAP_UNZEROED); 889} 890 891JEMALLOC_ALWAYS_INLINE size_t 892arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) 893{ 894 size_t mapbits; 895 896 mapbits = arena_mapbits_get(chunk, pageind); 897 assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & 898 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); 899 return (mapbits & CHUNK_MAP_DECOMMITTED); 900} 901 902JEMALLOC_ALWAYS_INLINE size_t 903arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) 904{ 905 size_t mapbits; 906 907 mapbits = arena_mapbits_get(chunk, pageind); 908 return (mapbits & CHUNK_MAP_LARGE); 909} 910 911JEMALLOC_ALWAYS_INLINE size_t 912arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind) 913{ 914 size_t mapbits; 915 916 mapbits = arena_mapbits_get(chunk, pageind); 917 return (mapbits & CHUNK_MAP_ALLOCATED); 918} 919 920JEMALLOC_ALWAYS_INLINE void 921arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) 922{ 923 924 *mapbitsp = mapbits; 925} 926 927JEMALLOC_ALWAYS_INLINE size_t 928arena_mapbits_size_encode(size_t size) 929{ 930 size_t mapbits; 931 932#if CHUNK_MAP_SIZE_SHIFT > 0 933 mapbits = size << CHUNK_MAP_SIZE_SHIFT; 934#elif CHUNK_MAP_SIZE_SHIFT == 0 935 mapbits = size; 936#else 937 mapbits = size >> -CHUNK_MAP_SIZE_SHIFT; 938#endif 939 940 assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); 941 return (mapbits); 942} 943 944JEMALLOC_ALWAYS_INLINE void 945arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, 946 size_t flags) 947{ 948 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); 949 950 assert((size & PAGE_MASK) == 0); 951 assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); 952 assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & 953 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); 954 arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | 955 CHUNK_MAP_BININD_INVALID | flags); 956} 957 958JEMALLOC_ALWAYS_INLINE void 959arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 960 size_t size) 961{ 962 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); 963 size_t mapbits = arena_mapbitsp_read(mapbitsp); 964 965 assert((size & PAGE_MASK) == 0); 966 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 967 arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | 968 (mapbits & ~CHUNK_MAP_SIZE_MASK)); 969} 970 971JEMALLOC_ALWAYS_INLINE void 972arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) 973{ 974 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); 975 976 assert((flags & CHUNK_MAP_UNZEROED) == flags); 977 arena_mapbitsp_write(mapbitsp, flags); 978} 979 980JEMALLOC_ALWAYS_INLINE void 981arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, 982 size_t flags) 983{ 984 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); 985 986 assert((size & PAGE_MASK) == 0); 987 assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); 988 assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & 989 (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); 990 arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | 991 CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | 992 CHUNK_MAP_ALLOCATED); 993} 994 995JEMALLOC_ALWAYS_INLINE void 996arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 997 szind_t binind) 998{ 999 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); 1000 size_t mapbits = arena_mapbitsp_read(mapbitsp); 1001 1002 assert(binind <= BININD_INVALID); 1003 assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS + 1004 large_pad); 1005 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | 1006 (binind << CHUNK_MAP_BININD_SHIFT)); 1007} 1008 1009JEMALLOC_ALWAYS_INLINE void 1010arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, 1011 szind_t binind, size_t flags) 1012{ 1013 size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); 1014 1015 assert(binind < BININD_INVALID); 1016 assert(pageind - runind >= map_bias); 1017 assert((flags & CHUNK_MAP_UNZEROED) == flags); 1018 arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) | 1019 (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED); 1020} 1021 1022JEMALLOC_INLINE void 1023arena_metadata_allocated_add(arena_t *arena, size_t size) 1024{ 1025 1026 atomic_add_z(&arena->stats.metadata_allocated, size); 1027} 1028 1029JEMALLOC_INLINE void 1030arena_metadata_allocated_sub(arena_t *arena, size_t size) 1031{ 1032 1033 atomic_sub_z(&arena->stats.metadata_allocated, size); 1034} 1035 1036JEMALLOC_INLINE size_t 1037arena_metadata_allocated_get(arena_t *arena) 1038{ 1039 1040 return (atomic_read_z(&arena->stats.metadata_allocated)); 1041} 1042 1043JEMALLOC_INLINE bool 1044arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) 1045{ 1046 1047 cassert(config_prof); 1048 assert(prof_interval != 0); 1049 1050 arena->prof_accumbytes += accumbytes; 1051 if (arena->prof_accumbytes >= prof_interval) { 1052 arena->prof_accumbytes -= prof_interval; 1053 return (true); 1054 } 1055 return (false); 1056} 1057 1058JEMALLOC_INLINE bool 1059arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) 1060{ 1061 1062 cassert(config_prof); 1063 1064 if (likely(prof_interval == 0)) 1065 return (false); 1066 return (arena_prof_accum_impl(arena, accumbytes)); 1067} 1068 1069JEMALLOC_INLINE bool 1070arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) 1071{ 1072 1073 cassert(config_prof); 1074 1075 if (likely(prof_interval == 0)) 1076 return (false); 1077 1078 { 1079 bool ret; 1080 1081 malloc_mutex_lock(tsdn, &arena->lock); 1082 ret = arena_prof_accum_impl(arena, accumbytes); 1083 malloc_mutex_unlock(tsdn, &arena->lock); 1084 return (ret); 1085 } 1086} 1087 1088JEMALLOC_ALWAYS_INLINE szind_t 1089arena_ptr_small_binind_get(const void *ptr, size_t mapbits) 1090{ 1091 szind_t binind; 1092 1093 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 1094 1095 if (config_debug) { 1096 arena_chunk_t *chunk; 1097 arena_t *arena; 1098 size_t pageind; 1099 size_t actual_mapbits; 1100 size_t rpages_ind; 1101 const arena_run_t *run; 1102 arena_bin_t *bin; 1103 szind_t run_binind, actual_binind; 1104 arena_bin_info_t *bin_info; 1105 const arena_chunk_map_misc_t *miscelm; 1106 const void *rpages; 1107 1108 assert(binind != BININD_INVALID); 1109 assert(binind < NBINS); 1110 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1111 arena = extent_node_arena_get(&chunk->node); 1112 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1113 actual_mapbits = arena_mapbits_get(chunk, pageind); 1114 assert(mapbits == actual_mapbits); 1115 assert(arena_mapbits_large_get(chunk, pageind) == 0); 1116 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 1117 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, 1118 pageind); 1119 miscelm = arena_miscelm_get_const(chunk, rpages_ind); 1120 run = &miscelm->run; 1121 run_binind = run->binind; 1122 bin = &arena->bins[run_binind]; 1123 actual_binind = (szind_t)(bin - arena->bins); 1124 assert(run_binind == actual_binind); 1125 bin_info = &arena_bin_info[actual_binind]; 1126 rpages = arena_miscelm_to_rpages(miscelm); 1127 assert(((uintptr_t)ptr - ((uintptr_t)rpages + 1128 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval 1129 == 0); 1130 } 1131 1132 return (binind); 1133} 1134# endif /* JEMALLOC_ARENA_INLINE_A */ 1135 1136# ifdef JEMALLOC_ARENA_INLINE_B 1137JEMALLOC_INLINE szind_t 1138arena_bin_index(arena_t *arena, arena_bin_t *bin) 1139{ 1140 szind_t binind = (szind_t)(bin - arena->bins); 1141 assert(binind < NBINS); 1142 return (binind); 1143} 1144 1145JEMALLOC_INLINE size_t 1146arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) 1147{ 1148 size_t diff, interval, shift, regind; 1149 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1150 void *rpages = arena_miscelm_to_rpages(miscelm); 1151 1152 /* 1153 * Freeing a pointer lower than region zero can cause assertion 1154 * failure. 1155 */ 1156 assert((uintptr_t)ptr >= (uintptr_t)rpages + 1157 (uintptr_t)bin_info->reg0_offset); 1158 1159 /* 1160 * Avoid doing division with a variable divisor if possible. Using 1161 * actual division here can reduce allocator throughput by over 20%! 1162 */ 1163 diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages - 1164 bin_info->reg0_offset); 1165 1166 /* Rescale (factor powers of 2 out of the numerator and denominator). */ 1167 interval = bin_info->reg_interval; 1168 shift = ffs_zu(interval) - 1; 1169 diff >>= shift; 1170 interval >>= shift; 1171 1172 if (interval == 1) { 1173 /* The divisor was a power of 2. */ 1174 regind = diff; 1175 } else { 1176 /* 1177 * To divide by a number D that is not a power of two we 1178 * multiply by (2^21 / D) and then right shift by 21 positions. 1179 * 1180 * X / D 1181 * 1182 * becomes 1183 * 1184 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT 1185 * 1186 * We can omit the first three elements, because we never 1187 * divide by 0, and 1 and 2 are both powers of two, which are 1188 * handled above. 1189 */ 1190#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS) 1191#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1) 1192 static const size_t interval_invs[] = { 1193 SIZE_INV(3), 1194 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), 1195 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), 1196 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), 1197 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), 1198 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), 1199 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), 1200 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) 1201 }; 1202 1203 if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t)) 1204 + 2))) { 1205 regind = (diff * interval_invs[interval - 3]) >> 1206 SIZE_INV_SHIFT; 1207 } else 1208 regind = diff / interval; 1209#undef SIZE_INV 1210#undef SIZE_INV_SHIFT 1211 } 1212 assert(diff == regind * interval); 1213 assert(regind < bin_info->nregs); 1214 1215 return (regind); 1216} 1217 1218JEMALLOC_INLINE prof_tctx_t * 1219arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) 1220{ 1221 prof_tctx_t *ret; 1222 arena_chunk_t *chunk; 1223 1224 cassert(config_prof); 1225 assert(ptr != NULL); 1226 1227 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1228 if (likely(chunk != ptr)) { 1229 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1230 size_t mapbits = arena_mapbits_get(chunk, pageind); 1231 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 1232 if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) 1233 ret = (prof_tctx_t *)(uintptr_t)1U; 1234 else { 1235 arena_chunk_map_misc_t *elm = 1236 arena_miscelm_get_mutable(chunk, pageind); 1237 ret = atomic_read_p(&elm->prof_tctx_pun); 1238 } 1239 } else 1240 ret = huge_prof_tctx_get(tsdn, ptr); 1241 1242 return (ret); 1243} 1244 1245JEMALLOC_INLINE void 1246arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, 1247 prof_tctx_t *tctx) 1248{ 1249 arena_chunk_t *chunk; 1250 1251 cassert(config_prof); 1252 assert(ptr != NULL); 1253 1254 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1255 if (likely(chunk != ptr)) { 1256 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1257 1258 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 1259 1260 if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx > 1261 (uintptr_t)1U)) { 1262 arena_chunk_map_misc_t *elm; 1263 1264 assert(arena_mapbits_large_get(chunk, pageind) != 0); 1265 1266 elm = arena_miscelm_get_mutable(chunk, pageind); 1267 atomic_write_p(&elm->prof_tctx_pun, tctx); 1268 } else { 1269 /* 1270 * tctx must always be initialized for large runs. 1271 * Assert that the surrounding conditional logic is 1272 * equivalent to checking whether ptr refers to a large 1273 * run. 1274 */ 1275 assert(arena_mapbits_large_get(chunk, pageind) == 0); 1276 } 1277 } else 1278 huge_prof_tctx_set(tsdn, ptr, tctx); 1279} 1280 1281JEMALLOC_INLINE void 1282arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, 1283 const void *old_ptr, prof_tctx_t *old_tctx) 1284{ 1285 1286 cassert(config_prof); 1287 assert(ptr != NULL); 1288 1289 if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr && 1290 (uintptr_t)old_tctx > (uintptr_t)1U))) { 1291 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1292 if (likely(chunk != ptr)) { 1293 size_t pageind; 1294 arena_chunk_map_misc_t *elm; 1295 1296 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> 1297 LG_PAGE; 1298 assert(arena_mapbits_allocated_get(chunk, pageind) != 1299 0); 1300 assert(arena_mapbits_large_get(chunk, pageind) != 0); 1301 1302 elm = arena_miscelm_get_mutable(chunk, pageind); 1303 atomic_write_p(&elm->prof_tctx_pun, 1304 (prof_tctx_t *)(uintptr_t)1U); 1305 } else 1306 huge_prof_tctx_reset(tsdn, ptr); 1307 } 1308} 1309 1310JEMALLOC_ALWAYS_INLINE void 1311arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) 1312{ 1313 tsd_t *tsd; 1314 ticker_t *decay_ticker; 1315 1316 if (unlikely(tsdn_null(tsdn))) 1317 return; 1318 tsd = tsdn_tsd(tsdn); 1319 decay_ticker = decay_ticker_get(tsd, arena->ind); 1320 if (unlikely(decay_ticker == NULL)) 1321 return; 1322 if (unlikely(ticker_ticks(decay_ticker, nticks))) 1323 arena_purge(tsdn, arena, false); 1324} 1325 1326JEMALLOC_ALWAYS_INLINE void 1327arena_decay_tick(tsdn_t *tsdn, arena_t *arena) 1328{ 1329 1330 arena_decay_ticks(tsdn, arena, 1); 1331} 1332 1333JEMALLOC_ALWAYS_INLINE void * 1334arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, 1335 tcache_t *tcache, bool slow_path) 1336{ 1337 1338 assert(!tsdn_null(tsdn) || tcache == NULL); 1339 assert(size != 0); 1340 1341 if (likely(tcache != NULL)) { 1342 if (likely(size <= SMALL_MAXCLASS)) { 1343 return (tcache_alloc_small(tsdn_tsd(tsdn), arena, 1344 tcache, size, ind, zero, slow_path)); 1345 } 1346 if (likely(size <= tcache_maxclass)) { 1347 return (tcache_alloc_large(tsdn_tsd(tsdn), arena, 1348 tcache, size, ind, zero, slow_path)); 1349 } 1350 /* (size > tcache_maxclass) case falls through. */ 1351 assert(size > tcache_maxclass); 1352 } 1353 1354 return (arena_malloc_hard(tsdn, arena, size, ind, zero)); 1355} 1356 1357JEMALLOC_ALWAYS_INLINE arena_t * 1358arena_aalloc(const void *ptr) 1359{ 1360 arena_chunk_t *chunk; 1361 1362 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1363 if (likely(chunk != ptr)) 1364 return (extent_node_arena_get(&chunk->node)); 1365 else 1366 return (huge_aalloc(ptr)); 1367} 1368 1369/* Return the size of the allocation pointed to by ptr. */ 1370JEMALLOC_ALWAYS_INLINE size_t 1371arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) 1372{ 1373 size_t ret; 1374 arena_chunk_t *chunk; 1375 size_t pageind; 1376 szind_t binind; 1377 1378 assert(ptr != NULL); 1379 1380 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1381 if (likely(chunk != ptr)) { 1382 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1383 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 1384 binind = arena_mapbits_binind_get(chunk, pageind); 1385 if (unlikely(binind == BININD_INVALID || (config_prof && !demote 1386 && arena_mapbits_large_get(chunk, pageind) != 0))) { 1387 /* 1388 * Large allocation. In the common case (demote), and 1389 * as this is an inline function, most callers will only 1390 * end up looking at binind to determine that ptr is a 1391 * small allocation. 1392 */ 1393 assert(config_cache_oblivious || ((uintptr_t)ptr & 1394 PAGE_MASK) == 0); 1395 ret = arena_mapbits_large_size_get(chunk, pageind) - 1396 large_pad; 1397 assert(ret != 0); 1398 assert(pageind + ((ret+large_pad)>>LG_PAGE) <= 1399 chunk_npages); 1400 assert(arena_mapbits_dirty_get(chunk, pageind) == 1401 arena_mapbits_dirty_get(chunk, 1402 pageind+((ret+large_pad)>>LG_PAGE)-1)); 1403 } else { 1404 /* 1405 * Small allocation (possibly promoted to a large 1406 * object). 1407 */ 1408 assert(arena_mapbits_large_get(chunk, pageind) != 0 || 1409 arena_ptr_small_binind_get(ptr, 1410 arena_mapbits_get(chunk, pageind)) == binind); 1411 ret = index2size(binind); 1412 } 1413 } else 1414 ret = huge_salloc(tsdn, ptr); 1415 1416 return (ret); 1417} 1418 1419JEMALLOC_ALWAYS_INLINE void 1420arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) 1421{ 1422 arena_chunk_t *chunk; 1423 size_t pageind, mapbits; 1424 1425 assert(!tsdn_null(tsdn) || tcache == NULL); 1426 assert(ptr != NULL); 1427 1428 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1429 if (likely(chunk != ptr)) { 1430 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1431 mapbits = arena_mapbits_get(chunk, pageind); 1432 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 1433 if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { 1434 /* Small allocation. */ 1435 if (likely(tcache != NULL)) { 1436 szind_t binind = arena_ptr_small_binind_get(ptr, 1437 mapbits); 1438 tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, 1439 binind, slow_path); 1440 } else { 1441 arena_dalloc_small(tsdn, 1442 extent_node_arena_get(&chunk->node), chunk, 1443 ptr, pageind); 1444 } 1445 } else { 1446 size_t size = arena_mapbits_large_size_get(chunk, 1447 pageind); 1448 1449 assert(config_cache_oblivious || ((uintptr_t)ptr & 1450 PAGE_MASK) == 0); 1451 1452 if (likely(tcache != NULL) && size - large_pad <= 1453 tcache_maxclass) { 1454 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, 1455 size - large_pad, slow_path); 1456 } else { 1457 arena_dalloc_large(tsdn, 1458 extent_node_arena_get(&chunk->node), chunk, 1459 ptr); 1460 } 1461 } 1462 } else 1463 huge_dalloc(tsdn, ptr); 1464} 1465 1466JEMALLOC_ALWAYS_INLINE void 1467arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, 1468 bool slow_path) 1469{ 1470 arena_chunk_t *chunk; 1471 1472 assert(!tsdn_null(tsdn) || tcache == NULL); 1473 1474 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1475 if (likely(chunk != ptr)) { 1476 if (config_prof && opt_prof) { 1477 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> 1478 LG_PAGE; 1479 assert(arena_mapbits_allocated_get(chunk, pageind) != 1480 0); 1481 if (arena_mapbits_large_get(chunk, pageind) != 0) { 1482 /* 1483 * Make sure to use promoted size, not request 1484 * size. 1485 */ 1486 size = arena_mapbits_large_size_get(chunk, 1487 pageind) - large_pad; 1488 } 1489 } 1490 assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); 1491 1492 if (likely(size <= SMALL_MAXCLASS)) { 1493 /* Small allocation. */ 1494 if (likely(tcache != NULL)) { 1495 szind_t binind = size2index(size); 1496 tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, 1497 binind, slow_path); 1498 } else { 1499 size_t pageind = ((uintptr_t)ptr - 1500 (uintptr_t)chunk) >> LG_PAGE; 1501 arena_dalloc_small(tsdn, 1502 extent_node_arena_get(&chunk->node), chunk, 1503 ptr, pageind); 1504 } 1505 } else { 1506 assert(config_cache_oblivious || ((uintptr_t)ptr & 1507 PAGE_MASK) == 0); 1508 1509 if (likely(tcache != NULL) && size <= tcache_maxclass) { 1510 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, 1511 size, slow_path); 1512 } else { 1513 arena_dalloc_large(tsdn, 1514 extent_node_arena_get(&chunk->node), chunk, 1515 ptr); 1516 } 1517 } 1518 } else 1519 huge_dalloc(tsdn, ptr); 1520} 1521# endif /* JEMALLOC_ARENA_INLINE_B */ 1522#endif 1523 1524#endif /* JEMALLOC_H_INLINES */ 1525/******************************************************************************/ 1526