arena.h revision 235322
1234370Sjasone/******************************************************************************/ 2234370Sjasone#ifdef JEMALLOC_H_TYPES 3234370Sjasone 4234370Sjasone/* 5234370Sjasone * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized 6234370Sjasone * as small as possible such that this setting is still honored, without 7234370Sjasone * violating other constraints. The goal is to make runs as small as possible 8234370Sjasone * without exceeding a per run external fragmentation threshold. 9234370Sjasone * 10234370Sjasone * We use binary fixed point math for overhead computations, where the binary 11234370Sjasone * point is implicitly RUN_BFP bits to the left. 12234370Sjasone * 13234370Sjasone * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be 14234370Sjasone * honored for some/all object sizes, since when heap profiling is enabled 15234370Sjasone * there is one pointer of header overhead per object (plus a constant). This 16234370Sjasone * constraint is relaxed (ignored) for runs that are so small that the 17234370Sjasone * per-region overhead is greater than: 18234370Sjasone * 19234370Sjasone * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) 20234370Sjasone */ 21234370Sjasone#define RUN_BFP 12 22234370Sjasone/* \/ Implicit binary fixed point. */ 23234370Sjasone#define RUN_MAX_OVRHD 0x0000003dU 24234370Sjasone#define RUN_MAX_OVRHD_RELAX 0x00001800U 25234370Sjasone 26234370Sjasone/* Maximum number of regions in one run. */ 27234370Sjasone#define LG_RUN_MAXREGS 11 28234370Sjasone#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) 29234370Sjasone 30234370Sjasone/* 31234370Sjasone * Minimum redzone size. Redzones may be larger than this if necessary to 32234370Sjasone * preserve region alignment. 33234370Sjasone */ 34234370Sjasone#define REDZONE_MINSIZE 16 35234370Sjasone 36234370Sjasone/* 37234370Sjasone * The minimum ratio of active:dirty pages per arena is computed as: 38234370Sjasone * 39234370Sjasone * (nactive >> opt_lg_dirty_mult) >= ndirty 40234370Sjasone * 41234370Sjasone * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32 42234370Sjasone * times as many active pages as dirty pages. 43234370Sjasone */ 44234370Sjasone#define LG_DIRTY_MULT_DEFAULT 5 45234370Sjasone 46234370Sjasonetypedef struct arena_chunk_map_s arena_chunk_map_t; 47234370Sjasonetypedef struct arena_chunk_s arena_chunk_t; 48234370Sjasonetypedef struct arena_run_s arena_run_t; 49234370Sjasonetypedef struct arena_bin_info_s arena_bin_info_t; 50234370Sjasonetypedef struct arena_bin_s arena_bin_t; 51234370Sjasonetypedef struct arena_s arena_t; 52234370Sjasone 53234370Sjasone#endif /* JEMALLOC_H_TYPES */ 54234370Sjasone/******************************************************************************/ 55234370Sjasone#ifdef JEMALLOC_H_STRUCTS 56234370Sjasone 57234370Sjasone/* Each element of the chunk map corresponds to one page within the chunk. */ 58234370Sjasonestruct arena_chunk_map_s { 59234370Sjasone#ifndef JEMALLOC_PROF 60234370Sjasone /* 61234370Sjasone * Overlay prof_ctx in order to allow it to be referenced by dead code. 62234370Sjasone * Such antics aren't warranted for per arena data structures, but 63234370Sjasone * chunk map overhead accounts for a percentage of memory, rather than 64234370Sjasone * being just a fixed cost. 65234370Sjasone */ 66234370Sjasone union { 67234370Sjasone#endif 68234370Sjasone union { 69234370Sjasone /* 70234370Sjasone * Linkage for run trees. There are two disjoint uses: 71234370Sjasone * 72234370Sjasone * 1) arena_t's runs_avail_{clean,dirty} trees. 73234370Sjasone * 2) arena_run_t conceptually uses this linkage for in-use 74234370Sjasone * non-full runs, rather than directly embedding linkage. 75234370Sjasone */ 76234370Sjasone rb_node(arena_chunk_map_t) rb_link; 77234370Sjasone /* 78234370Sjasone * List of runs currently in purgatory. arena_chunk_purge() 79234370Sjasone * temporarily allocates runs that contain dirty pages while 80234370Sjasone * purging, so that other threads cannot use the runs while the 81234370Sjasone * purging thread is operating without the arena lock held. 82234370Sjasone */ 83234370Sjasone ql_elm(arena_chunk_map_t) ql_link; 84234370Sjasone } u; 85234370Sjasone 86234370Sjasone /* Profile counters, used for large object runs. */ 87234370Sjasone prof_ctx_t *prof_ctx; 88234370Sjasone#ifndef JEMALLOC_PROF 89234370Sjasone }; /* union { ... }; */ 90234370Sjasone#endif 91234370Sjasone 92234370Sjasone /* 93234370Sjasone * Run address (or size) and various flags are stored together. The bit 94234370Sjasone * layout looks like (assuming 32-bit system): 95234370Sjasone * 96235322Sjasone * ???????? ???????? ????nnnn nnnndula 97234370Sjasone * 98234370Sjasone * ? : Unallocated: Run address for first/last pages, unset for internal 99234370Sjasone * pages. 100234370Sjasone * Small: Run page offset. 101234370Sjasone * Large: Run size for first page, unset for trailing pages. 102235322Sjasone * n : binind for small size class, BININD_INVALID for large size class. 103234370Sjasone * d : dirty? 104234370Sjasone * u : unzeroed? 105234370Sjasone * l : large? 106234370Sjasone * a : allocated? 107234370Sjasone * 108234370Sjasone * Following are example bit patterns for the three types of runs. 109234370Sjasone * 110234370Sjasone * p : run page offset 111234370Sjasone * s : run size 112235238Sjasone * n : binind for size class; large objects set these to BININD_INVALID 113235238Sjasone * except for promoted allocations (see prof_promote) 114234370Sjasone * x : don't care 115234370Sjasone * - : 0 116234370Sjasone * + : 1 117234370Sjasone * [DULA] : bit set 118234370Sjasone * [dula] : bit unset 119234370Sjasone * 120234370Sjasone * Unallocated (clean): 121235322Sjasone * ssssssss ssssssss ssss++++ ++++du-a 122235238Sjasone * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx 123235322Sjasone * ssssssss ssssssss ssss++++ ++++dU-a 124234370Sjasone * 125234370Sjasone * Unallocated (dirty): 126235322Sjasone * ssssssss ssssssss ssss++++ ++++D--a 127235238Sjasone * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 128235322Sjasone * ssssssss ssssssss ssss++++ ++++D--a 129234370Sjasone * 130234370Sjasone * Small: 131235238Sjasone * pppppppp pppppppp ppppnnnn nnnnd--A 132235238Sjasone * pppppppp pppppppp ppppnnnn nnnn---A 133235238Sjasone * pppppppp pppppppp ppppnnnn nnnnd--A 134234370Sjasone * 135234370Sjasone * Large: 136235322Sjasone * ssssssss ssssssss ssss++++ ++++D-LA 137235238Sjasone * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 138235322Sjasone * -------- -------- ----++++ ++++D-LA 139234370Sjasone * 140234370Sjasone * Large (sampled, size <= PAGE): 141235238Sjasone * ssssssss ssssssss ssssnnnn nnnnD-LA 142234370Sjasone * 143234370Sjasone * Large (not sampled, size == PAGE): 144235322Sjasone * ssssssss ssssssss ssss++++ ++++D-LA 145234370Sjasone */ 146234370Sjasone size_t bits; 147235238Sjasone#define CHUNK_MAP_BININD_SHIFT 4 148235238Sjasone#define BININD_INVALID ((size_t)0xffU) 149235238Sjasone/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */ 150235238Sjasone#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U) 151235238Sjasone#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK 152235238Sjasone#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU) 153234370Sjasone#define CHUNK_MAP_DIRTY ((size_t)0x8U) 154234370Sjasone#define CHUNK_MAP_UNZEROED ((size_t)0x4U) 155234370Sjasone#define CHUNK_MAP_LARGE ((size_t)0x2U) 156234370Sjasone#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) 157234370Sjasone#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED 158234370Sjasone}; 159234370Sjasonetypedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; 160234370Sjasonetypedef rb_tree(arena_chunk_map_t) arena_run_tree_t; 161234370Sjasone 162234370Sjasone/* Arena chunk header. */ 163234370Sjasonestruct arena_chunk_s { 164234370Sjasone /* Arena that owns the chunk. */ 165234370Sjasone arena_t *arena; 166234370Sjasone 167234370Sjasone /* Linkage for the arena's chunks_dirty list. */ 168234370Sjasone ql_elm(arena_chunk_t) link_dirty; 169234370Sjasone 170234370Sjasone /* 171234370Sjasone * True if the chunk is currently in the chunks_dirty list, due to 172234370Sjasone * having at some point contained one or more dirty pages. Removal 173234370Sjasone * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible. 174234370Sjasone */ 175234370Sjasone bool dirtied; 176234370Sjasone 177234370Sjasone /* Number of dirty pages. */ 178234370Sjasone size_t ndirty; 179234370Sjasone 180234370Sjasone /* 181234370Sjasone * Map of pages within chunk that keeps track of free/large/small. The 182234370Sjasone * first map_bias entries are omitted, since the chunk header does not 183234370Sjasone * need to be tracked in the map. This omission saves a header page 184234370Sjasone * for common chunk sizes (e.g. 4 MiB). 185234370Sjasone */ 186234370Sjasone arena_chunk_map_t map[1]; /* Dynamically sized. */ 187234370Sjasone}; 188234370Sjasonetypedef rb_tree(arena_chunk_t) arena_chunk_tree_t; 189234370Sjasone 190234370Sjasonestruct arena_run_s { 191234370Sjasone /* Bin this run is associated with. */ 192234370Sjasone arena_bin_t *bin; 193234370Sjasone 194234370Sjasone /* Index of next region that has never been allocated, or nregs. */ 195234370Sjasone uint32_t nextind; 196234370Sjasone 197234370Sjasone /* Number of free regions in run. */ 198234370Sjasone unsigned nfree; 199234370Sjasone}; 200234370Sjasone 201234370Sjasone/* 202234370Sjasone * Read-only information associated with each element of arena_t's bins array 203234370Sjasone * is stored separately, partly to reduce memory usage (only one copy, rather 204234370Sjasone * than one per arena), but mainly to avoid false cacheline sharing. 205234370Sjasone * 206234370Sjasone * Each run has the following layout: 207234370Sjasone * 208234370Sjasone * /--------------------\ 209234370Sjasone * | arena_run_t header | 210234370Sjasone * | ... | 211234370Sjasone * bitmap_offset | bitmap | 212234370Sjasone * | ... | 213234370Sjasone * ctx0_offset | ctx map | 214234370Sjasone * | ... | 215234370Sjasone * |--------------------| 216234370Sjasone * | redzone | 217234370Sjasone * reg0_offset | region 0 | 218234370Sjasone * | redzone | 219234370Sjasone * |--------------------| \ 220234370Sjasone * | redzone | | 221234370Sjasone * | region 1 | > reg_interval 222234370Sjasone * | redzone | / 223234370Sjasone * |--------------------| 224234370Sjasone * | ... | 225234370Sjasone * | ... | 226234370Sjasone * | ... | 227234370Sjasone * |--------------------| 228234370Sjasone * | redzone | 229234370Sjasone * | region nregs-1 | 230234370Sjasone * | redzone | 231234370Sjasone * |--------------------| 232234370Sjasone * | alignment pad? | 233234370Sjasone * \--------------------/ 234234370Sjasone * 235234370Sjasone * reg_interval has at least the same minimum alignment as reg_size; this 236234370Sjasone * preserves the alignment constraint that sa2u() depends on. Alignment pad is 237234370Sjasone * either 0 or redzone_size; it is present only if needed to align reg0_offset. 238234370Sjasone */ 239234370Sjasonestruct arena_bin_info_s { 240234370Sjasone /* Size of regions in a run for this bin's size class. */ 241234370Sjasone size_t reg_size; 242234370Sjasone 243234370Sjasone /* Redzone size. */ 244234370Sjasone size_t redzone_size; 245234370Sjasone 246234370Sjasone /* Interval between regions (reg_size + (redzone_size << 1)). */ 247234370Sjasone size_t reg_interval; 248234370Sjasone 249234370Sjasone /* Total size of a run for this bin's size class. */ 250234370Sjasone size_t run_size; 251234370Sjasone 252234370Sjasone /* Total number of regions in a run for this bin's size class. */ 253234370Sjasone uint32_t nregs; 254234370Sjasone 255234370Sjasone /* 256234370Sjasone * Offset of first bitmap_t element in a run header for this bin's size 257234370Sjasone * class. 258234370Sjasone */ 259234370Sjasone uint32_t bitmap_offset; 260234370Sjasone 261234370Sjasone /* 262234370Sjasone * Metadata used to manipulate bitmaps for runs associated with this 263234370Sjasone * bin. 264234370Sjasone */ 265234370Sjasone bitmap_info_t bitmap_info; 266234370Sjasone 267234370Sjasone /* 268234370Sjasone * Offset of first (prof_ctx_t *) in a run header for this bin's size 269234370Sjasone * class, or 0 if (config_prof == false || opt_prof == false). 270234370Sjasone */ 271234370Sjasone uint32_t ctx0_offset; 272234370Sjasone 273234370Sjasone /* Offset of first region in a run for this bin's size class. */ 274234370Sjasone uint32_t reg0_offset; 275234370Sjasone}; 276234370Sjasone 277234370Sjasonestruct arena_bin_s { 278234370Sjasone /* 279234370Sjasone * All operations on runcur, runs, and stats require that lock be 280234370Sjasone * locked. Run allocation/deallocation are protected by the arena lock, 281234370Sjasone * which may be acquired while holding one or more bin locks, but not 282234370Sjasone * vise versa. 283234370Sjasone */ 284234370Sjasone malloc_mutex_t lock; 285234370Sjasone 286234370Sjasone /* 287234370Sjasone * Current run being used to service allocations of this bin's size 288234370Sjasone * class. 289234370Sjasone */ 290234370Sjasone arena_run_t *runcur; 291234370Sjasone 292234370Sjasone /* 293234370Sjasone * Tree of non-full runs. This tree is used when looking for an 294234370Sjasone * existing run when runcur is no longer usable. We choose the 295234370Sjasone * non-full run that is lowest in memory; this policy tends to keep 296234370Sjasone * objects packed well, and it can also help reduce the number of 297234370Sjasone * almost-empty chunks. 298234370Sjasone */ 299234370Sjasone arena_run_tree_t runs; 300234370Sjasone 301234370Sjasone /* Bin statistics. */ 302234370Sjasone malloc_bin_stats_t stats; 303234370Sjasone}; 304234370Sjasone 305234370Sjasonestruct arena_s { 306234370Sjasone /* This arena's index within the arenas array. */ 307234370Sjasone unsigned ind; 308234370Sjasone 309234370Sjasone /* 310234370Sjasone * Number of threads currently assigned to this arena. This field is 311234370Sjasone * protected by arenas_lock. 312234370Sjasone */ 313234370Sjasone unsigned nthreads; 314234370Sjasone 315234370Sjasone /* 316234370Sjasone * There are three classes of arena operations from a locking 317234370Sjasone * perspective: 318234370Sjasone * 1) Thread asssignment (modifies nthreads) is protected by 319234370Sjasone * arenas_lock. 320234370Sjasone * 2) Bin-related operations are protected by bin locks. 321234370Sjasone * 3) Chunk- and run-related operations are protected by this mutex. 322234370Sjasone */ 323234370Sjasone malloc_mutex_t lock; 324234370Sjasone 325234370Sjasone arena_stats_t stats; 326234370Sjasone /* 327234370Sjasone * List of tcaches for extant threads associated with this arena. 328234370Sjasone * Stats from these are merged incrementally, and at exit. 329234370Sjasone */ 330234370Sjasone ql_head(tcache_t) tcache_ql; 331234370Sjasone 332234370Sjasone uint64_t prof_accumbytes; 333234370Sjasone 334234370Sjasone /* List of dirty-page-containing chunks this arena manages. */ 335234370Sjasone ql_head(arena_chunk_t) chunks_dirty; 336234370Sjasone 337234370Sjasone /* 338234370Sjasone * In order to avoid rapid chunk allocation/deallocation when an arena 339234370Sjasone * oscillates right on the cusp of needing a new chunk, cache the most 340234370Sjasone * recently freed chunk. The spare is left in the arena's chunk trees 341234370Sjasone * until it is deleted. 342234370Sjasone * 343234370Sjasone * There is one spare chunk per arena, rather than one spare total, in 344234370Sjasone * order to avoid interactions between multiple threads that could make 345234370Sjasone * a single spare inadequate. 346234370Sjasone */ 347234370Sjasone arena_chunk_t *spare; 348234370Sjasone 349234370Sjasone /* Number of pages in active runs. */ 350234370Sjasone size_t nactive; 351234370Sjasone 352234370Sjasone /* 353234370Sjasone * Current count of pages within unused runs that are potentially 354234370Sjasone * dirty, and for which madvise(... MADV_DONTNEED) has not been called. 355234370Sjasone * By tracking this, we can institute a limit on how much dirty unused 356234370Sjasone * memory is mapped for each arena. 357234370Sjasone */ 358234370Sjasone size_t ndirty; 359234370Sjasone 360234370Sjasone /* 361234370Sjasone * Approximate number of pages being purged. It is possible for 362234370Sjasone * multiple threads to purge dirty pages concurrently, and they use 363234370Sjasone * npurgatory to indicate the total number of pages all threads are 364234370Sjasone * attempting to purge. 365234370Sjasone */ 366234370Sjasone size_t npurgatory; 367234370Sjasone 368234370Sjasone /* 369234370Sjasone * Size/address-ordered trees of this arena's available runs. The trees 370234370Sjasone * are used for first-best-fit run allocation. The dirty tree contains 371234370Sjasone * runs with dirty pages (i.e. very likely to have been touched and 372234370Sjasone * therefore have associated physical pages), whereas the clean tree 373234370Sjasone * contains runs with pages that either have no associated physical 374234370Sjasone * pages, or have pages that the kernel may recycle at any time due to 375234370Sjasone * previous madvise(2) calls. The dirty tree is used in preference to 376234370Sjasone * the clean tree for allocations, because using dirty pages reduces 377234370Sjasone * the amount of dirty purging necessary to keep the active:dirty page 378234370Sjasone * ratio below the purge threshold. 379234370Sjasone */ 380234370Sjasone arena_avail_tree_t runs_avail_clean; 381234370Sjasone arena_avail_tree_t runs_avail_dirty; 382234370Sjasone 383234370Sjasone /* bins is used to store trees of free regions. */ 384234370Sjasone arena_bin_t bins[NBINS]; 385234370Sjasone}; 386234370Sjasone 387234370Sjasone#endif /* JEMALLOC_H_STRUCTS */ 388234370Sjasone/******************************************************************************/ 389234370Sjasone#ifdef JEMALLOC_H_EXTERNS 390234370Sjasone 391234370Sjasoneextern ssize_t opt_lg_dirty_mult; 392234370Sjasone/* 393234370Sjasone * small_size2bin is a compact lookup table that rounds request sizes up to 394234370Sjasone * size classes. In order to reduce cache footprint, the table is compressed, 395234370Sjasone * and all accesses are via the SMALL_SIZE2BIN macro. 396234370Sjasone */ 397234370Sjasoneextern uint8_t const small_size2bin[]; 398234370Sjasone#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) 399234370Sjasone 400234370Sjasoneextern arena_bin_info_t arena_bin_info[NBINS]; 401234370Sjasone 402234370Sjasone/* Number of large size classes. */ 403234370Sjasone#define nlclasses (chunk_npages - map_bias) 404234370Sjasone 405234370Sjasonevoid arena_purge_all(arena_t *arena); 406234370Sjasonevoid arena_prof_accum(arena_t *arena, uint64_t accumbytes); 407234370Sjasonevoid arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, 408234370Sjasone size_t binind, uint64_t prof_accumbytes); 409234370Sjasonevoid arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, 410234370Sjasone bool zero); 411234370Sjasonevoid arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); 412234370Sjasonevoid *arena_malloc_small(arena_t *arena, size_t size, bool zero); 413234370Sjasonevoid *arena_malloc_large(arena_t *arena, size_t size, bool zero); 414234370Sjasonevoid *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); 415234370Sjasonevoid arena_prof_promoted(const void *ptr, size_t size); 416235238Sjasonevoid arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 417235238Sjasone arena_chunk_map_t *mapelm); 418234370Sjasonevoid arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 419235238Sjasone size_t pageind, arena_chunk_map_t *mapelm); 420235238Sjasonevoid arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 421235238Sjasone size_t pageind); 422235238Sjasonevoid arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, 423235238Sjasone void *ptr); 424234370Sjasonevoid arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); 425234370Sjasonevoid arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, 426234370Sjasone arena_stats_t *astats, malloc_bin_stats_t *bstats, 427234370Sjasone malloc_large_stats_t *lstats); 428234370Sjasonevoid *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, 429234370Sjasone size_t extra, bool zero); 430234370Sjasonevoid *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, 431234370Sjasone size_t alignment, bool zero, bool try_tcache); 432234370Sjasonebool arena_new(arena_t *arena, unsigned ind); 433234370Sjasonevoid arena_boot(void); 434234370Sjasonevoid arena_prefork(arena_t *arena); 435234370Sjasonevoid arena_postfork_parent(arena_t *arena); 436234370Sjasonevoid arena_postfork_child(arena_t *arena); 437234370Sjasone 438234370Sjasone#endif /* JEMALLOC_H_EXTERNS */ 439234370Sjasone/******************************************************************************/ 440234370Sjasone#ifdef JEMALLOC_H_INLINES 441234370Sjasone 442234370Sjasone#ifndef JEMALLOC_ENABLE_INLINE 443235238Sjasonearena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); 444235238Sjasonesize_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); 445235238Sjasonesize_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); 446235238Sjasonesize_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, 447235238Sjasone size_t pageind); 448235238Sjasonesize_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); 449235238Sjasonesize_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); 450235238Sjasonesize_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); 451235238Sjasonesize_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); 452235238Sjasonesize_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); 453235238Sjasonesize_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); 454235238Sjasonesize_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); 455235238Sjasonevoid arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, 456235238Sjasone size_t size, size_t flags); 457235238Sjasonevoid arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 458235238Sjasone size_t size); 459235238Sjasonevoid arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, 460235238Sjasone size_t size, size_t flags); 461235238Sjasonevoid arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 462235238Sjasone size_t binind); 463235238Sjasonevoid arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, 464235238Sjasone size_t runind, size_t binind, size_t flags); 465235238Sjasonevoid arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, 466235238Sjasone size_t unzeroed); 467235238Sjasonesize_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); 468234370Sjasonesize_t arena_bin_index(arena_t *arena, arena_bin_t *bin); 469234370Sjasoneunsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, 470234370Sjasone const void *ptr); 471234370Sjasoneprof_ctx_t *arena_prof_ctx_get(const void *ptr); 472234370Sjasonevoid arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); 473234370Sjasonevoid *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); 474234543Sjasonesize_t arena_salloc(const void *ptr, bool demote); 475234370Sjasonevoid arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, 476234370Sjasone bool try_tcache); 477234370Sjasone#endif 478234370Sjasone 479234370Sjasone#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) 480235238Sjasone# ifdef JEMALLOC_ARENA_INLINE_A 481235238SjasoneJEMALLOC_INLINE arena_chunk_map_t * 482235238Sjasonearena_mapp_get(arena_chunk_t *chunk, size_t pageind) 483235238Sjasone{ 484235238Sjasone 485235238Sjasone assert(pageind >= map_bias); 486235238Sjasone assert(pageind < chunk_npages); 487235238Sjasone 488235238Sjasone return (&chunk->map[pageind-map_bias]); 489235238Sjasone} 490235238Sjasone 491235238SjasoneJEMALLOC_INLINE size_t * 492235238Sjasonearena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) 493235238Sjasone{ 494235238Sjasone 495235238Sjasone return (&arena_mapp_get(chunk, pageind)->bits); 496235238Sjasone} 497235238Sjasone 498234370SjasoneJEMALLOC_INLINE size_t 499235238Sjasonearena_mapbits_get(arena_chunk_t *chunk, size_t pageind) 500235238Sjasone{ 501235238Sjasone 502235238Sjasone return (*arena_mapbitsp_get(chunk, pageind)); 503235238Sjasone} 504235238Sjasone 505235238SjasoneJEMALLOC_INLINE size_t 506235238Sjasonearena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) 507235238Sjasone{ 508235238Sjasone size_t mapbits; 509235238Sjasone 510235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 511235238Sjasone assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 512235238Sjasone return (mapbits & ~PAGE_MASK); 513235238Sjasone} 514235238Sjasone 515235238SjasoneJEMALLOC_INLINE size_t 516235238Sjasonearena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) 517235238Sjasone{ 518235238Sjasone size_t mapbits; 519235238Sjasone 520235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 521235238Sjasone assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 522235238Sjasone (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); 523235238Sjasone return (mapbits & ~PAGE_MASK); 524235238Sjasone} 525235238Sjasone 526235238SjasoneJEMALLOC_INLINE size_t 527235238Sjasonearena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) 528235238Sjasone{ 529235238Sjasone size_t mapbits; 530235238Sjasone 531235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 532235238Sjasone assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 533235238Sjasone CHUNK_MAP_ALLOCATED); 534235238Sjasone return (mapbits >> LG_PAGE); 535235238Sjasone} 536235238Sjasone 537235238SjasoneJEMALLOC_INLINE size_t 538235238Sjasonearena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) 539235238Sjasone{ 540235238Sjasone size_t mapbits; 541235238Sjasone size_t binind; 542235238Sjasone 543235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 544235238Sjasone binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 545235238Sjasone assert(binind < NBINS || binind == BININD_INVALID); 546235238Sjasone return (binind); 547235238Sjasone} 548235238Sjasone 549235238SjasoneJEMALLOC_INLINE size_t 550235238Sjasonearena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) 551235238Sjasone{ 552235238Sjasone size_t mapbits; 553235238Sjasone 554235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 555235238Sjasone return (mapbits & CHUNK_MAP_DIRTY); 556235238Sjasone} 557235238Sjasone 558235238SjasoneJEMALLOC_INLINE size_t 559235238Sjasonearena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) 560235238Sjasone{ 561235238Sjasone size_t mapbits; 562235238Sjasone 563235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 564235238Sjasone return (mapbits & CHUNK_MAP_UNZEROED); 565235238Sjasone} 566235238Sjasone 567235238SjasoneJEMALLOC_INLINE size_t 568235238Sjasonearena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) 569235238Sjasone{ 570235238Sjasone size_t mapbits; 571235238Sjasone 572235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 573235238Sjasone return (mapbits & CHUNK_MAP_LARGE); 574235238Sjasone} 575235238Sjasone 576235238SjasoneJEMALLOC_INLINE size_t 577235238Sjasonearena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) 578235238Sjasone{ 579235238Sjasone size_t mapbits; 580235238Sjasone 581235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 582235238Sjasone return (mapbits & CHUNK_MAP_ALLOCATED); 583235238Sjasone} 584235238Sjasone 585235238SjasoneJEMALLOC_INLINE void 586235238Sjasonearena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, 587235238Sjasone size_t flags) 588235238Sjasone{ 589235238Sjasone size_t *mapbitsp; 590235238Sjasone 591235238Sjasone mapbitsp = arena_mapbitsp_get(chunk, pageind); 592235238Sjasone assert((size & PAGE_MASK) == 0); 593235238Sjasone assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); 594235322Sjasone assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags); 595235238Sjasone *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags; 596235238Sjasone} 597235238Sjasone 598235238SjasoneJEMALLOC_INLINE void 599235238Sjasonearena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 600235238Sjasone size_t size) 601235238Sjasone{ 602235238Sjasone size_t *mapbitsp; 603235238Sjasone 604235238Sjasone mapbitsp = arena_mapbitsp_get(chunk, pageind); 605235238Sjasone assert((size & PAGE_MASK) == 0); 606235238Sjasone assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 607235238Sjasone *mapbitsp = size | (*mapbitsp & PAGE_MASK); 608235238Sjasone} 609235238Sjasone 610235238SjasoneJEMALLOC_INLINE void 611235238Sjasonearena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, 612235238Sjasone size_t flags) 613235238Sjasone{ 614235238Sjasone size_t *mapbitsp; 615235322Sjasone size_t unzeroed; 616235238Sjasone 617235238Sjasone mapbitsp = arena_mapbitsp_get(chunk, pageind); 618235238Sjasone assert((size & PAGE_MASK) == 0); 619235322Sjasone assert((flags & CHUNK_MAP_DIRTY) == flags); 620235322Sjasone unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ 621235322Sjasone *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed | 622235322Sjasone CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; 623235238Sjasone} 624235238Sjasone 625235238SjasoneJEMALLOC_INLINE void 626235238Sjasonearena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 627235238Sjasone size_t binind) 628235238Sjasone{ 629235238Sjasone size_t *mapbitsp; 630235238Sjasone 631235238Sjasone assert(binind <= BININD_INVALID); 632235238Sjasone mapbitsp = arena_mapbitsp_get(chunk, pageind); 633235238Sjasone assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); 634235238Sjasone *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind << 635235238Sjasone CHUNK_MAP_BININD_SHIFT); 636235238Sjasone} 637235238Sjasone 638235238SjasoneJEMALLOC_INLINE void 639235238Sjasonearena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, 640235238Sjasone size_t binind, size_t flags) 641235238Sjasone{ 642235238Sjasone size_t *mapbitsp; 643235322Sjasone size_t unzeroed; 644235238Sjasone 645235238Sjasone assert(binind < BININD_INVALID); 646235238Sjasone mapbitsp = arena_mapbitsp_get(chunk, pageind); 647235238Sjasone assert(pageind - runind >= map_bias); 648235322Sjasone assert((flags & CHUNK_MAP_DIRTY) == flags); 649235322Sjasone unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ 650235238Sjasone *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) | 651235322Sjasone flags | unzeroed | CHUNK_MAP_ALLOCATED; 652235238Sjasone} 653235238Sjasone 654235238SjasoneJEMALLOC_INLINE void 655235238Sjasonearena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, 656235238Sjasone size_t unzeroed) 657235238Sjasone{ 658235238Sjasone size_t *mapbitsp; 659235238Sjasone 660235238Sjasone mapbitsp = arena_mapbitsp_get(chunk, pageind); 661235238Sjasone *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed; 662235238Sjasone} 663235238Sjasone 664235238SjasoneJEMALLOC_INLINE size_t 665235238Sjasonearena_ptr_small_binind_get(const void *ptr, size_t mapbits) 666235238Sjasone{ 667235238Sjasone size_t binind; 668235238Sjasone 669235238Sjasone binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 670235238Sjasone 671235238Sjasone if (config_debug) { 672235238Sjasone arena_chunk_t *chunk; 673235238Sjasone arena_t *arena; 674235238Sjasone size_t pageind; 675235238Sjasone size_t actual_mapbits; 676235238Sjasone arena_run_t *run; 677235238Sjasone arena_bin_t *bin; 678235238Sjasone size_t actual_binind; 679235238Sjasone arena_bin_info_t *bin_info; 680235238Sjasone 681235238Sjasone assert(binind != BININD_INVALID); 682235238Sjasone assert(binind < NBINS); 683235238Sjasone chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 684235238Sjasone arena = chunk->arena; 685235238Sjasone pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 686235238Sjasone actual_mapbits = arena_mapbits_get(chunk, pageind); 687235238Sjasone assert(mapbits == actual_mapbits); 688235238Sjasone assert(arena_mapbits_large_get(chunk, pageind) == 0); 689235238Sjasone assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 690235238Sjasone run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 691235238Sjasone (actual_mapbits >> LG_PAGE)) << LG_PAGE)); 692235238Sjasone bin = run->bin; 693235238Sjasone actual_binind = bin - arena->bins; 694235238Sjasone assert(binind == actual_binind); 695235238Sjasone bin_info = &arena_bin_info[actual_binind]; 696235238Sjasone assert(((uintptr_t)ptr - ((uintptr_t)run + 697235238Sjasone (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval 698235238Sjasone == 0); 699235238Sjasone } 700235238Sjasone 701235238Sjasone return (binind); 702235238Sjasone} 703235238Sjasone# endif /* JEMALLOC_ARENA_INLINE_A */ 704235238Sjasone 705235238Sjasone# ifdef JEMALLOC_ARENA_INLINE_B 706235238SjasoneJEMALLOC_INLINE size_t 707234370Sjasonearena_bin_index(arena_t *arena, arena_bin_t *bin) 708234370Sjasone{ 709234370Sjasone size_t binind = bin - arena->bins; 710234370Sjasone assert(binind < NBINS); 711234370Sjasone return (binind); 712234370Sjasone} 713234370Sjasone 714234370SjasoneJEMALLOC_INLINE unsigned 715234370Sjasonearena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) 716234370Sjasone{ 717234370Sjasone unsigned shift, diff, regind; 718234370Sjasone size_t interval; 719234370Sjasone 720234370Sjasone /* 721234370Sjasone * Freeing a pointer lower than region zero can cause assertion 722234370Sjasone * failure. 723234370Sjasone */ 724234370Sjasone assert((uintptr_t)ptr >= (uintptr_t)run + 725234370Sjasone (uintptr_t)bin_info->reg0_offset); 726234370Sjasone 727234370Sjasone /* 728234370Sjasone * Avoid doing division with a variable divisor if possible. Using 729234370Sjasone * actual division here can reduce allocator throughput by over 20%! 730234370Sjasone */ 731234370Sjasone diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - 732234370Sjasone bin_info->reg0_offset); 733234370Sjasone 734234370Sjasone /* Rescale (factor powers of 2 out of the numerator and denominator). */ 735234370Sjasone interval = bin_info->reg_interval; 736234370Sjasone shift = ffs(interval) - 1; 737234370Sjasone diff >>= shift; 738234370Sjasone interval >>= shift; 739234370Sjasone 740234370Sjasone if (interval == 1) { 741234370Sjasone /* The divisor was a power of 2. */ 742234370Sjasone regind = diff; 743234370Sjasone } else { 744234370Sjasone /* 745234370Sjasone * To divide by a number D that is not a power of two we 746234370Sjasone * multiply by (2^21 / D) and then right shift by 21 positions. 747234370Sjasone * 748234370Sjasone * X / D 749234370Sjasone * 750234370Sjasone * becomes 751234370Sjasone * 752234370Sjasone * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT 753234370Sjasone * 754234370Sjasone * We can omit the first three elements, because we never 755234370Sjasone * divide by 0, and 1 and 2 are both powers of two, which are 756234370Sjasone * handled above. 757234370Sjasone */ 758234370Sjasone#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) 759234370Sjasone#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) 760234370Sjasone static const unsigned interval_invs[] = { 761234370Sjasone SIZE_INV(3), 762234370Sjasone SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), 763234370Sjasone SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), 764234370Sjasone SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), 765234370Sjasone SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), 766234370Sjasone SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), 767234370Sjasone SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), 768234370Sjasone SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) 769234370Sjasone }; 770234370Sjasone 771234370Sjasone if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + 772234370Sjasone 2)) { 773234370Sjasone regind = (diff * interval_invs[interval - 3]) >> 774234370Sjasone SIZE_INV_SHIFT; 775234370Sjasone } else 776234370Sjasone regind = diff / interval; 777234370Sjasone#undef SIZE_INV 778234370Sjasone#undef SIZE_INV_SHIFT 779234370Sjasone } 780234370Sjasone assert(diff == regind * interval); 781234370Sjasone assert(regind < bin_info->nregs); 782234370Sjasone 783234370Sjasone return (regind); 784234370Sjasone} 785234370Sjasone 786234370SjasoneJEMALLOC_INLINE prof_ctx_t * 787234370Sjasonearena_prof_ctx_get(const void *ptr) 788234370Sjasone{ 789234370Sjasone prof_ctx_t *ret; 790234370Sjasone arena_chunk_t *chunk; 791234370Sjasone size_t pageind, mapbits; 792234370Sjasone 793234370Sjasone cassert(config_prof); 794234370Sjasone assert(ptr != NULL); 795234370Sjasone assert(CHUNK_ADDR2BASE(ptr) != ptr); 796234370Sjasone 797234370Sjasone chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 798234370Sjasone pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 799235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 800234370Sjasone assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 801234370Sjasone if ((mapbits & CHUNK_MAP_LARGE) == 0) { 802234370Sjasone if (prof_promote) 803234370Sjasone ret = (prof_ctx_t *)(uintptr_t)1U; 804234370Sjasone else { 805234370Sjasone arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + 806234370Sjasone (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << 807234370Sjasone LG_PAGE)); 808235238Sjasone size_t binind = arena_ptr_small_binind_get(ptr, 809235238Sjasone mapbits); 810234370Sjasone arena_bin_info_t *bin_info = &arena_bin_info[binind]; 811234370Sjasone unsigned regind; 812234370Sjasone 813234370Sjasone regind = arena_run_regind(run, bin_info, ptr); 814234370Sjasone ret = *(prof_ctx_t **)((uintptr_t)run + 815234370Sjasone bin_info->ctx0_offset + (regind * 816234370Sjasone sizeof(prof_ctx_t *))); 817234370Sjasone } 818234370Sjasone } else 819235238Sjasone ret = arena_mapp_get(chunk, pageind)->prof_ctx; 820234370Sjasone 821234370Sjasone return (ret); 822234370Sjasone} 823234370Sjasone 824234370SjasoneJEMALLOC_INLINE void 825234370Sjasonearena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) 826234370Sjasone{ 827234370Sjasone arena_chunk_t *chunk; 828234370Sjasone size_t pageind, mapbits; 829234370Sjasone 830234370Sjasone cassert(config_prof); 831234370Sjasone assert(ptr != NULL); 832234370Sjasone assert(CHUNK_ADDR2BASE(ptr) != ptr); 833234370Sjasone 834234370Sjasone chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 835234370Sjasone pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 836235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 837234370Sjasone assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 838234370Sjasone if ((mapbits & CHUNK_MAP_LARGE) == 0) { 839234370Sjasone if (prof_promote == false) { 840234370Sjasone arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + 841234370Sjasone (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << 842234370Sjasone LG_PAGE)); 843234370Sjasone size_t binind; 844234370Sjasone arena_bin_info_t *bin_info; 845234370Sjasone unsigned regind; 846234370Sjasone 847235238Sjasone binind = arena_ptr_small_binind_get(ptr, mapbits); 848234370Sjasone bin_info = &arena_bin_info[binind]; 849234370Sjasone regind = arena_run_regind(run, bin_info, ptr); 850234370Sjasone 851234370Sjasone *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset 852234370Sjasone + (regind * sizeof(prof_ctx_t *)))) = ctx; 853234370Sjasone } else 854234370Sjasone assert((uintptr_t)ctx == (uintptr_t)1U); 855234370Sjasone } else 856235238Sjasone arena_mapp_get(chunk, pageind)->prof_ctx = ctx; 857234370Sjasone} 858234370Sjasone 859234370SjasoneJEMALLOC_INLINE void * 860234370Sjasonearena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) 861234370Sjasone{ 862234370Sjasone tcache_t *tcache; 863234370Sjasone 864234370Sjasone assert(size != 0); 865234370Sjasone assert(size <= arena_maxclass); 866234370Sjasone 867234370Sjasone if (size <= SMALL_MAXCLASS) { 868234370Sjasone if (try_tcache && (tcache = tcache_get(true)) != NULL) 869234370Sjasone return (tcache_alloc_small(tcache, size, zero)); 870234370Sjasone else { 871234370Sjasone return (arena_malloc_small(choose_arena(arena), size, 872234370Sjasone zero)); 873234370Sjasone } 874234370Sjasone } else { 875234370Sjasone /* 876234370Sjasone * Initialize tcache after checking size in order to avoid 877234370Sjasone * infinite recursion during tcache initialization. 878234370Sjasone */ 879234370Sjasone if (try_tcache && size <= tcache_maxclass && (tcache = 880234370Sjasone tcache_get(true)) != NULL) 881234370Sjasone return (tcache_alloc_large(tcache, size, zero)); 882234370Sjasone else { 883234370Sjasone return (arena_malloc_large(choose_arena(arena), size, 884234370Sjasone zero)); 885234370Sjasone } 886234370Sjasone } 887234370Sjasone} 888234370Sjasone 889234543Sjasone/* Return the size of the allocation pointed to by ptr. */ 890234543SjasoneJEMALLOC_INLINE size_t 891234543Sjasonearena_salloc(const void *ptr, bool demote) 892234543Sjasone{ 893234543Sjasone size_t ret; 894234543Sjasone arena_chunk_t *chunk; 895235238Sjasone size_t pageind, binind; 896234543Sjasone 897234543Sjasone assert(ptr != NULL); 898234543Sjasone assert(CHUNK_ADDR2BASE(ptr) != ptr); 899234543Sjasone 900234543Sjasone chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 901234543Sjasone pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 902235238Sjasone assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 903235238Sjasone binind = arena_mapbits_binind_get(chunk, pageind); 904235238Sjasone if (binind == BININD_INVALID || (config_prof && demote == false && 905235238Sjasone prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) { 906235238Sjasone /* 907235238Sjasone * Large allocation. In the common case (demote == true), and 908235238Sjasone * as this is an inline function, most callers will only end up 909235238Sjasone * looking at binind to determine that ptr is a small 910235238Sjasone * allocation. 911235238Sjasone */ 912234543Sjasone assert(((uintptr_t)ptr & PAGE_MASK) == 0); 913235238Sjasone ret = arena_mapbits_large_size_get(chunk, pageind); 914234543Sjasone assert(ret != 0); 915235238Sjasone assert(pageind + (ret>>LG_PAGE) <= chunk_npages); 916235238Sjasone assert(ret == PAGE || arena_mapbits_large_size_get(chunk, 917235238Sjasone pageind+(ret>>LG_PAGE)-1) == 0); 918235238Sjasone assert(binind == arena_mapbits_binind_get(chunk, 919235238Sjasone pageind+(ret>>LG_PAGE)-1)); 920235238Sjasone assert(arena_mapbits_dirty_get(chunk, pageind) == 921235238Sjasone arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); 922235238Sjasone } else { 923235238Sjasone /* 924235238Sjasone * Small allocation (possibly promoted to a large object due to 925235238Sjasone * prof_promote). 926235238Sjasone */ 927235238Sjasone assert(arena_mapbits_large_get(chunk, pageind) != 0 || 928235238Sjasone arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 929235238Sjasone pageind)) == binind); 930235238Sjasone ret = arena_bin_info[binind].reg_size; 931234543Sjasone } 932234543Sjasone 933234543Sjasone return (ret); 934234543Sjasone} 935234543Sjasone 936234370SjasoneJEMALLOC_INLINE void 937234370Sjasonearena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) 938234370Sjasone{ 939235238Sjasone size_t pageind, mapbits; 940234370Sjasone tcache_t *tcache; 941234370Sjasone 942234370Sjasone assert(arena != NULL); 943234370Sjasone assert(chunk->arena == arena); 944234370Sjasone assert(ptr != NULL); 945234370Sjasone assert(CHUNK_ADDR2BASE(ptr) != ptr); 946234370Sjasone 947234370Sjasone pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 948235238Sjasone mapbits = arena_mapbits_get(chunk, pageind); 949235238Sjasone assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 950235238Sjasone if ((mapbits & CHUNK_MAP_LARGE) == 0) { 951234370Sjasone /* Small allocation. */ 952235238Sjasone if (try_tcache && (tcache = tcache_get(false)) != NULL) { 953235238Sjasone size_t binind; 954234370Sjasone 955235238Sjasone binind = arena_ptr_small_binind_get(ptr, mapbits); 956235238Sjasone tcache_dalloc_small(tcache, ptr, binind); 957235238Sjasone } else 958235238Sjasone arena_dalloc_small(arena, chunk, ptr, pageind); 959234370Sjasone } else { 960235238Sjasone size_t size = arena_mapbits_large_size_get(chunk, pageind); 961234370Sjasone 962234370Sjasone assert(((uintptr_t)ptr & PAGE_MASK) == 0); 963234370Sjasone 964234370Sjasone if (try_tcache && size <= tcache_maxclass && (tcache = 965234370Sjasone tcache_get(false)) != NULL) { 966234370Sjasone tcache_dalloc_large(tcache, ptr, size); 967235238Sjasone } else 968234370Sjasone arena_dalloc_large(arena, chunk, ptr); 969234370Sjasone } 970234370Sjasone} 971235238Sjasone# endif /* JEMALLOC_ARENA_INLINE_B */ 972234370Sjasone#endif 973234370Sjasone 974234370Sjasone#endif /* JEMALLOC_H_INLINES */ 975234370Sjasone/******************************************************************************/ 976