1#define JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/div.h"
7#include "jemalloc/internal/extent_dss.h"
8#include "jemalloc/internal/extent_mmap.h"
9#include "jemalloc/internal/mutex.h"
10#include "jemalloc/internal/rtree.h"
11#include "jemalloc/internal/size_classes.h"
12#include "jemalloc/internal/util.h"
13
14/******************************************************************************/
15/* Data. */
16
17/*
18 * Define names for both unininitialized and initialized phases, so that
19 * options and mallctl processing are straightforward.
20 */
21const char *percpu_arena_mode_names[] = {
22	"percpu",
23	"phycpu",
24	"disabled",
25	"percpu",
26	"phycpu"
27};
28percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
29
30ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
31ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
32
33static atomic_zd_t dirty_decay_ms_default;
34static atomic_zd_t muzzy_decay_ms_default;
35
36const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
37#define STEP(step, h, x, y)			\
38		h,
39		SMOOTHSTEP
40#undef STEP
41};
42
43static div_info_t arena_binind_div_info[NBINS];
44
45/******************************************************************************/
46/*
47 * Function prototypes for static functions that are referenced prior to
48 * definition.
49 */
50
51static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
52    arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
53    size_t npages_decay_max, bool is_background_thread);
54static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
55    bool is_background_thread, bool all);
56static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
57    bin_t *bin);
58static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
59    bin_t *bin);
60
61/******************************************************************************/
62
63void
64arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
65    const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
66    size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
67	*nthreads += arena_nthreads_get(arena, false);
68	*dss = dss_prec_names[arena_dss_prec_get(arena)];
69	*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
70	*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
71	*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
72	*ndirty += extents_npages_get(&arena->extents_dirty);
73	*nmuzzy += extents_npages_get(&arena->extents_muzzy);
74}
75
76void
77arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
78    const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
79    size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
80    bin_stats_t *bstats, arena_stats_large_t *lstats) {
81	cassert(config_stats);
82
83	arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
84	    muzzy_decay_ms, nactive, ndirty, nmuzzy);
85
86	size_t base_allocated, base_resident, base_mapped, metadata_thp;
87	base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
88	    &base_mapped, &metadata_thp);
89
90	arena_stats_lock(tsdn, &arena->stats);
91
92	arena_stats_accum_zu(&astats->mapped, base_mapped
93	    + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
94	arena_stats_accum_zu(&astats->retained,
95	    extents_npages_get(&arena->extents_retained) << LG_PAGE);
96
97	arena_stats_accum_u64(&astats->decay_dirty.npurge,
98	    arena_stats_read_u64(tsdn, &arena->stats,
99	    &arena->stats.decay_dirty.npurge));
100	arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
101	    arena_stats_read_u64(tsdn, &arena->stats,
102	    &arena->stats.decay_dirty.nmadvise));
103	arena_stats_accum_u64(&astats->decay_dirty.purged,
104	    arena_stats_read_u64(tsdn, &arena->stats,
105	    &arena->stats.decay_dirty.purged));
106
107	arena_stats_accum_u64(&astats->decay_muzzy.npurge,
108	    arena_stats_read_u64(tsdn, &arena->stats,
109	    &arena->stats.decay_muzzy.npurge));
110	arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
111	    arena_stats_read_u64(tsdn, &arena->stats,
112	    &arena->stats.decay_muzzy.nmadvise));
113	arena_stats_accum_u64(&astats->decay_muzzy.purged,
114	    arena_stats_read_u64(tsdn, &arena->stats,
115	    &arena->stats.decay_muzzy.purged));
116
117	arena_stats_accum_zu(&astats->base, base_allocated);
118	arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
119	arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
120	arena_stats_accum_zu(&astats->resident, base_resident +
121	    (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
122	    extents_npages_get(&arena->extents_dirty) +
123	    extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
124
125	for (szind_t i = 0; i < NSIZES - NBINS; i++) {
126		uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
127		    &arena->stats.lstats[i].nmalloc);
128		arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
129		arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
130
131		uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
132		    &arena->stats.lstats[i].ndalloc);
133		arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
134		arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
135
136		uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
137		    &arena->stats.lstats[i].nrequests);
138		arena_stats_accum_u64(&lstats[i].nrequests,
139		    nmalloc + nrequests);
140		arena_stats_accum_u64(&astats->nrequests_large,
141		    nmalloc + nrequests);
142
143		assert(nmalloc >= ndalloc);
144		assert(nmalloc - ndalloc <= SIZE_T_MAX);
145		size_t curlextents = (size_t)(nmalloc - ndalloc);
146		lstats[i].curlextents += curlextents;
147		arena_stats_accum_zu(&astats->allocated_large,
148		    curlextents * sz_index2size(NBINS + i));
149	}
150
151	arena_stats_unlock(tsdn, &arena->stats);
152
153	/* tcache_bytes counts currently cached bytes. */
154	atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
155	malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
156	cache_bin_array_descriptor_t *descriptor;
157	ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
158		szind_t i = 0;
159		for (; i < NBINS; i++) {
160			cache_bin_t *tbin = &descriptor->bins_small[i];
161			arena_stats_accum_zu(&astats->tcache_bytes,
162			    tbin->ncached * sz_index2size(i));
163		}
164		for (; i < nhbins; i++) {
165			cache_bin_t *tbin = &descriptor->bins_large[i];
166			arena_stats_accum_zu(&astats->tcache_bytes,
167			    tbin->ncached * sz_index2size(i));
168		}
169	}
170	malloc_mutex_prof_read(tsdn,
171	    &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
172	    &arena->tcache_ql_mtx);
173	malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
174
175#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind)				\
176    malloc_mutex_lock(tsdn, &arena->mtx);				\
177    malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind],		\
178        &arena->mtx);							\
179    malloc_mutex_unlock(tsdn, &arena->mtx);
180
181	/* Gather per arena mutex profiling data. */
182	READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
183	READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
184	    arena_prof_mutex_extent_avail)
185	READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
186	    arena_prof_mutex_extents_dirty)
187	READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
188	    arena_prof_mutex_extents_muzzy)
189	READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
190	    arena_prof_mutex_extents_retained)
191	READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
192	    arena_prof_mutex_decay_dirty)
193	READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
194	    arena_prof_mutex_decay_muzzy)
195	READ_ARENA_MUTEX_PROF_DATA(base->mtx,
196	    arena_prof_mutex_base)
197#undef READ_ARENA_MUTEX_PROF_DATA
198
199	nstime_copy(&astats->uptime, &arena->create_time);
200	nstime_update(&astats->uptime);
201	nstime_subtract(&astats->uptime, &arena->create_time);
202
203	for (szind_t i = 0; i < NBINS; i++) {
204		bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]);
205	}
206}
207
208void
209arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
210    extent_hooks_t **r_extent_hooks, extent_t *extent) {
211	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
212	    WITNESS_RANK_CORE, 0);
213
214	extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
215	    extent);
216	if (arena_dirty_decay_ms_get(arena) == 0) {
217		arena_decay_dirty(tsdn, arena, false, true);
218	} else {
219		arena_background_thread_inactivity_check(tsdn, arena, false);
220	}
221}
222
223static void *
224arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
225	void *ret;
226	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
227	size_t regind;
228
229	assert(extent_nfree_get(slab) > 0);
230	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
231
232	regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
233	ret = (void *)((uintptr_t)extent_addr_get(slab) +
234	    (uintptr_t)(bin_info->reg_size * regind));
235	extent_nfree_dec(slab);
236	return ret;
237}
238
239#ifndef JEMALLOC_JET
240static
241#endif
242size_t
243arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
244	size_t diff, regind;
245
246	/* Freeing a pointer outside the slab can cause assertion failure. */
247	assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
248	assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
249	/* Freeing an interior pointer can cause assertion failure. */
250	assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
251	    (uintptr_t)bin_infos[binind].reg_size == 0);
252
253	diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
254
255	/* Avoid doing division with a variable divisor. */
256	regind = div_compute(&arena_binind_div_info[binind], diff);
257
258	assert(regind < bin_infos[binind].nregs);
259
260	return regind;
261}
262
263static void
264arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
265	szind_t binind = extent_szind_get(slab);
266	const bin_info_t *bin_info = &bin_infos[binind];
267	size_t regind = arena_slab_regind(slab, binind, ptr);
268
269	assert(extent_nfree_get(slab) < bin_info->nregs);
270	/* Freeing an unallocated pointer can cause assertion failure. */
271	assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
272
273	bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
274	extent_nfree_inc(slab);
275}
276
277static void
278arena_nactive_add(arena_t *arena, size_t add_pages) {
279	atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
280}
281
282static void
283arena_nactive_sub(arena_t *arena, size_t sub_pages) {
284	assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
285	atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
286}
287
288static void
289arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
290	szind_t index, hindex;
291
292	cassert(config_stats);
293
294	if (usize < LARGE_MINCLASS) {
295		usize = LARGE_MINCLASS;
296	}
297	index = sz_size2index(usize);
298	hindex = (index >= NBINS) ? index - NBINS : 0;
299
300	arena_stats_add_u64(tsdn, &arena->stats,
301	    &arena->stats.lstats[hindex].nmalloc, 1);
302}
303
304static void
305arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
306	szind_t index, hindex;
307
308	cassert(config_stats);
309
310	if (usize < LARGE_MINCLASS) {
311		usize = LARGE_MINCLASS;
312	}
313	index = sz_size2index(usize);
314	hindex = (index >= NBINS) ? index - NBINS : 0;
315
316	arena_stats_add_u64(tsdn, &arena->stats,
317	    &arena->stats.lstats[hindex].ndalloc, 1);
318}
319
320static void
321arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
322    size_t usize) {
323	arena_large_dalloc_stats_update(tsdn, arena, oldusize);
324	arena_large_malloc_stats_update(tsdn, arena, usize);
325}
326
327extent_t *
328arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
329    size_t alignment, bool *zero) {
330	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
331
332	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
333	    WITNESS_RANK_CORE, 0);
334
335	szind_t szind = sz_size2index(usize);
336	size_t mapped_add;
337	bool commit = true;
338	extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
339	    &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
340	    szind, zero, &commit);
341	if (extent == NULL) {
342		extent = extents_alloc(tsdn, arena, &extent_hooks,
343		    &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
344		    false, szind, zero, &commit);
345	}
346	size_t size = usize + sz_large_pad;
347	if (extent == NULL) {
348		extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
349		    usize, sz_large_pad, alignment, false, szind, zero,
350		    &commit);
351		if (config_stats) {
352			/*
353			 * extent may be NULL on OOM, but in that case
354			 * mapped_add isn't used below, so there's no need to
355			 * conditionlly set it to 0 here.
356			 */
357			mapped_add = size;
358		}
359	} else if (config_stats) {
360		mapped_add = 0;
361	}
362
363	if (extent != NULL) {
364		if (config_stats) {
365			arena_stats_lock(tsdn, &arena->stats);
366			arena_large_malloc_stats_update(tsdn, arena, usize);
367			if (mapped_add != 0) {
368				arena_stats_add_zu(tsdn, &arena->stats,
369				    &arena->stats.mapped, mapped_add);
370			}
371			arena_stats_unlock(tsdn, &arena->stats);
372		}
373		arena_nactive_add(arena, size >> LG_PAGE);
374	}
375
376	return extent;
377}
378
379void
380arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
381	if (config_stats) {
382		arena_stats_lock(tsdn, &arena->stats);
383		arena_large_dalloc_stats_update(tsdn, arena,
384		    extent_usize_get(extent));
385		arena_stats_unlock(tsdn, &arena->stats);
386	}
387	arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
388}
389
390void
391arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
392    size_t oldusize) {
393	size_t usize = extent_usize_get(extent);
394	size_t udiff = oldusize - usize;
395
396	if (config_stats) {
397		arena_stats_lock(tsdn, &arena->stats);
398		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
399		arena_stats_unlock(tsdn, &arena->stats);
400	}
401	arena_nactive_sub(arena, udiff >> LG_PAGE);
402}
403
404void
405arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
406    size_t oldusize) {
407	size_t usize = extent_usize_get(extent);
408	size_t udiff = usize - oldusize;
409
410	if (config_stats) {
411		arena_stats_lock(tsdn, &arena->stats);
412		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
413		arena_stats_unlock(tsdn, &arena->stats);
414	}
415	arena_nactive_add(arena, udiff >> LG_PAGE);
416}
417
418static ssize_t
419arena_decay_ms_read(arena_decay_t *decay) {
420	return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
421}
422
423static void
424arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
425	atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
426}
427
428static void
429arena_decay_deadline_init(arena_decay_t *decay) {
430	/*
431	 * Generate a new deadline that is uniformly random within the next
432	 * epoch after the current one.
433	 */
434	nstime_copy(&decay->deadline, &decay->epoch);
435	nstime_add(&decay->deadline, &decay->interval);
436	if (arena_decay_ms_read(decay) > 0) {
437		nstime_t jitter;
438
439		nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
440		    nstime_ns(&decay->interval)));
441		nstime_add(&decay->deadline, &jitter);
442	}
443}
444
445static bool
446arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
447	return (nstime_compare(&decay->deadline, time) <= 0);
448}
449
450static size_t
451arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
452	uint64_t sum;
453	size_t npages_limit_backlog;
454	unsigned i;
455
456	/*
457	 * For each element of decay_backlog, multiply by the corresponding
458	 * fixed-point smoothstep decay factor.  Sum the products, then divide
459	 * to round down to the nearest whole number of pages.
460	 */
461	sum = 0;
462	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
463		sum += decay->backlog[i] * h_steps[i];
464	}
465	npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
466
467	return npages_limit_backlog;
468}
469
470static void
471arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
472	size_t npages_delta = (current_npages > decay->nunpurged) ?
473	    current_npages - decay->nunpurged : 0;
474	decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
475
476	if (config_debug) {
477		if (current_npages > decay->ceil_npages) {
478			decay->ceil_npages = current_npages;
479		}
480		size_t npages_limit = arena_decay_backlog_npages_limit(decay);
481		assert(decay->ceil_npages >= npages_limit);
482		if (decay->ceil_npages > npages_limit) {
483			decay->ceil_npages = npages_limit;
484		}
485	}
486}
487
488static void
489arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
490    size_t current_npages) {
491	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
492		memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
493		    sizeof(size_t));
494	} else {
495		size_t nadvance_z = (size_t)nadvance_u64;
496
497		assert((uint64_t)nadvance_z == nadvance_u64);
498
499		memmove(decay->backlog, &decay->backlog[nadvance_z],
500		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
501		if (nadvance_z > 1) {
502			memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
503			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
504		}
505	}
506
507	arena_decay_backlog_update_last(decay, current_npages);
508}
509
510static void
511arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
512    extents_t *extents, size_t current_npages, size_t npages_limit,
513    bool is_background_thread) {
514	if (current_npages > npages_limit) {
515		arena_decay_to_limit(tsdn, arena, decay, extents, false,
516		    npages_limit, current_npages - npages_limit,
517		    is_background_thread);
518	}
519}
520
521static void
522arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
523    size_t current_npages) {
524	assert(arena_decay_deadline_reached(decay, time));
525
526	nstime_t delta;
527	nstime_copy(&delta, time);
528	nstime_subtract(&delta, &decay->epoch);
529
530	uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
531	assert(nadvance_u64 > 0);
532
533	/* Add nadvance_u64 decay intervals to epoch. */
534	nstime_copy(&delta, &decay->interval);
535	nstime_imultiply(&delta, nadvance_u64);
536	nstime_add(&decay->epoch, &delta);
537
538	/* Set a new deadline. */
539	arena_decay_deadline_init(decay);
540
541	/* Update the backlog. */
542	arena_decay_backlog_update(decay, nadvance_u64, current_npages);
543}
544
545static void
546arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
547    extents_t *extents, const nstime_t *time, bool is_background_thread) {
548	size_t current_npages = extents_npages_get(extents);
549	arena_decay_epoch_advance_helper(decay, time, current_npages);
550
551	size_t npages_limit = arena_decay_backlog_npages_limit(decay);
552	/* We may unlock decay->mtx when try_purge(). Finish logging first. */
553	decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
554	    current_npages;
555
556	if (!background_thread_enabled() || is_background_thread) {
557		arena_decay_try_purge(tsdn, arena, decay, extents,
558		    current_npages, npages_limit, is_background_thread);
559	}
560}
561
562static void
563arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
564	arena_decay_ms_write(decay, decay_ms);
565	if (decay_ms > 0) {
566		nstime_init(&decay->interval, (uint64_t)decay_ms *
567		    KQU(1000000));
568		nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
569	}
570
571	nstime_init(&decay->epoch, 0);
572	nstime_update(&decay->epoch);
573	decay->jitter_state = (uint64_t)(uintptr_t)decay;
574	arena_decay_deadline_init(decay);
575	decay->nunpurged = 0;
576	memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
577}
578
579static bool
580arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
581    arena_stats_decay_t *stats) {
582	if (config_debug) {
583		for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
584			assert(((char *)decay)[i] == 0);
585		}
586		decay->ceil_npages = 0;
587	}
588	if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
589	    malloc_mutex_rank_exclusive)) {
590		return true;
591	}
592	decay->purging = false;
593	arena_decay_reinit(decay, decay_ms);
594	/* Memory is zeroed, so there is no need to clear stats. */
595	if (config_stats) {
596		decay->stats = stats;
597	}
598	return false;
599}
600
601static bool
602arena_decay_ms_valid(ssize_t decay_ms) {
603	if (decay_ms < -1) {
604		return false;
605	}
606	if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
607	    KQU(1000)) {
608		return true;
609	}
610	return false;
611}
612
613static bool
614arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
615    extents_t *extents, bool is_background_thread) {
616	malloc_mutex_assert_owner(tsdn, &decay->mtx);
617
618	/* Purge all or nothing if the option is disabled. */
619	ssize_t decay_ms = arena_decay_ms_read(decay);
620	if (decay_ms <= 0) {
621		if (decay_ms == 0) {
622			arena_decay_to_limit(tsdn, arena, decay, extents, false,
623			    0, extents_npages_get(extents),
624			    is_background_thread);
625		}
626		return false;
627	}
628
629	nstime_t time;
630	nstime_init(&time, 0);
631	nstime_update(&time);
632	if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
633	    > 0)) {
634		/*
635		 * Time went backwards.  Move the epoch back in time and
636		 * generate a new deadline, with the expectation that time
637		 * typically flows forward for long enough periods of time that
638		 * epochs complete.  Unfortunately, this strategy is susceptible
639		 * to clock jitter triggering premature epoch advances, but
640		 * clock jitter estimation and compensation isn't feasible here
641		 * because calls into this code are event-driven.
642		 */
643		nstime_copy(&decay->epoch, &time);
644		arena_decay_deadline_init(decay);
645#ifndef __NetBSD__
646	} else {
647		/* Verify that time does not go backwards. */
648		assert(nstime_compare(&decay->epoch, &time) <= 0);
649#endif
650	}
651
652	/*
653	 * If the deadline has been reached, advance to the current epoch and
654	 * purge to the new limit if necessary.  Note that dirty pages created
655	 * during the current epoch are not subject to purge until a future
656	 * epoch, so as a result purging only happens during epoch advances, or
657	 * being triggered by background threads (scheduled event).
658	 */
659	bool advance_epoch = arena_decay_deadline_reached(decay, &time);
660	if (advance_epoch) {
661		arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
662		    is_background_thread);
663	} else if (is_background_thread) {
664		arena_decay_try_purge(tsdn, arena, decay, extents,
665		    extents_npages_get(extents),
666		    arena_decay_backlog_npages_limit(decay),
667		    is_background_thread);
668	}
669
670	return advance_epoch;
671}
672
673static ssize_t
674arena_decay_ms_get(arena_decay_t *decay) {
675	return arena_decay_ms_read(decay);
676}
677
678ssize_t
679arena_dirty_decay_ms_get(arena_t *arena) {
680	return arena_decay_ms_get(&arena->decay_dirty);
681}
682
683ssize_t
684arena_muzzy_decay_ms_get(arena_t *arena) {
685	return arena_decay_ms_get(&arena->decay_muzzy);
686}
687
688static bool
689arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
690    extents_t *extents, ssize_t decay_ms) {
691	if (!arena_decay_ms_valid(decay_ms)) {
692		return true;
693	}
694
695	malloc_mutex_lock(tsdn, &decay->mtx);
696	/*
697	 * Restart decay backlog from scratch, which may cause many dirty pages
698	 * to be immediately purged.  It would conceptually be possible to map
699	 * the old backlog onto the new backlog, but there is no justification
700	 * for such complexity since decay_ms changes are intended to be
701	 * infrequent, either between the {-1, 0, >0} states, or a one-time
702	 * arbitrary change during initial arena configuration.
703	 */
704	arena_decay_reinit(decay, decay_ms);
705	arena_maybe_decay(tsdn, arena, decay, extents, false);
706	malloc_mutex_unlock(tsdn, &decay->mtx);
707
708	return false;
709}
710
711bool
712arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
713    ssize_t decay_ms) {
714	return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
715	    &arena->extents_dirty, decay_ms);
716}
717
718bool
719arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
720    ssize_t decay_ms) {
721	return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
722	    &arena->extents_muzzy, decay_ms);
723}
724
725static size_t
726arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
727    extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
728	size_t npages_decay_max, extent_list_t *decay_extents) {
729	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
730	    WITNESS_RANK_CORE, 0);
731
732	/* Stash extents according to npages_limit. */
733	size_t nstashed = 0;
734	extent_t *extent;
735	while (nstashed < npages_decay_max &&
736	    (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
737	    npages_limit)) != NULL) {
738		extent_list_append(decay_extents, extent);
739		nstashed += extent_size_get(extent) >> LG_PAGE;
740	}
741	return nstashed;
742}
743
744static size_t
745arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
746    extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
747    bool all, extent_list_t *decay_extents, bool is_background_thread) {
748	UNUSED size_t nmadvise, nunmapped;
749	size_t npurged;
750
751	if (config_stats) {
752		nmadvise = 0;
753		nunmapped = 0;
754	}
755	npurged = 0;
756
757	ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
758	for (extent_t *extent = extent_list_first(decay_extents); extent !=
759	    NULL; extent = extent_list_first(decay_extents)) {
760		if (config_stats) {
761			nmadvise++;
762		}
763		size_t npages = extent_size_get(extent) >> LG_PAGE;
764		npurged += npages;
765		extent_list_remove(decay_extents, extent);
766		switch (extents_state_get(extents)) {
767		case extent_state_active:
768			not_reached();
769		case extent_state_dirty:
770			if (!all && muzzy_decay_ms != 0 &&
771			    !extent_purge_lazy_wrapper(tsdn, arena,
772			    r_extent_hooks, extent, 0,
773			    extent_size_get(extent))) {
774				extents_dalloc(tsdn, arena, r_extent_hooks,
775				    &arena->extents_muzzy, extent);
776				arena_background_thread_inactivity_check(tsdn,
777				    arena, is_background_thread);
778				break;
779			}
780			/* Fall through. */
781		case extent_state_muzzy:
782			extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
783			    extent);
784			if (config_stats) {
785				nunmapped += npages;
786			}
787			break;
788		case extent_state_retained:
789		default:
790			not_reached();
791		}
792	}
793
794	if (config_stats) {
795		arena_stats_lock(tsdn, &arena->stats);
796		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
797		    1);
798		arena_stats_add_u64(tsdn, &arena->stats,
799		    &decay->stats->nmadvise, nmadvise);
800		arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
801		    npurged);
802		arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
803		    nunmapped << LG_PAGE);
804		arena_stats_unlock(tsdn, &arena->stats);
805	}
806
807	return npurged;
808}
809
810/*
811 * npages_limit: Decay at most npages_decay_max pages without violating the
812 * invariant: (extents_npages_get(extents) >= npages_limit).  We need an upper
813 * bound on number of pages in order to prevent unbounded growth (namely in
814 * stashed), otherwise unbounded new pages could be added to extents during the
815 * current decay run, so that the purging thread never finishes.
816 */
817static void
818arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
819    extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
820    bool is_background_thread) {
821	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
822	    WITNESS_RANK_CORE, 1);
823	malloc_mutex_assert_owner(tsdn, &decay->mtx);
824
825	if (decay->purging) {
826		return;
827	}
828	decay->purging = true;
829	malloc_mutex_unlock(tsdn, &decay->mtx);
830
831	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
832
833	extent_list_t decay_extents;
834	extent_list_init(&decay_extents);
835
836	size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
837	    npages_limit, npages_decay_max, &decay_extents);
838	if (npurge != 0) {
839		UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
840		    &extent_hooks, decay, extents, all, &decay_extents,
841		    is_background_thread);
842		assert(npurged == npurge);
843	}
844
845	malloc_mutex_lock(tsdn, &decay->mtx);
846	decay->purging = false;
847}
848
849static bool
850arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
851    extents_t *extents, bool is_background_thread, bool all) {
852	if (all) {
853		malloc_mutex_lock(tsdn, &decay->mtx);
854		arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
855		    extents_npages_get(extents), is_background_thread);
856		malloc_mutex_unlock(tsdn, &decay->mtx);
857
858		return false;
859	}
860
861	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
862		/* No need to wait if another thread is in progress. */
863		return true;
864	}
865
866	bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
867	    is_background_thread);
868	UNUSED size_t npages_new;
869	if (epoch_advanced) {
870		/* Backlog is updated on epoch advance. */
871		npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
872	} else
873		npages_new = 0;	// XXX: gcc without -O
874	malloc_mutex_unlock(tsdn, &decay->mtx);
875
876	if (have_background_thread && background_thread_enabled() &&
877	    epoch_advanced && !is_background_thread) {
878		background_thread_interval_check(tsdn, arena, decay,
879		    npages_new);
880	}
881
882	return false;
883}
884
885static bool
886arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
887    bool all) {
888	return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
889	    &arena->extents_dirty, is_background_thread, all);
890}
891
892static bool
893arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
894    bool all) {
895	return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
896	    &arena->extents_muzzy, is_background_thread, all);
897}
898
899void
900arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
901	if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
902		return;
903	}
904	arena_decay_muzzy(tsdn, arena, is_background_thread, all);
905}
906
907static void
908arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
909	arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
910
911	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
912	arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
913}
914
915static void
916arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
917	assert(extent_nfree_get(slab) > 0);
918	extent_heap_insert(&bin->slabs_nonfull, slab);
919}
920
921static void
922arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
923	extent_heap_remove(&bin->slabs_nonfull, slab);
924}
925
926static extent_t *
927arena_bin_slabs_nonfull_tryget(bin_t *bin) {
928	extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
929	if (slab == NULL) {
930		return NULL;
931	}
932	if (config_stats) {
933		bin->stats.reslabs++;
934	}
935	return slab;
936}
937
938static void
939arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
940	assert(extent_nfree_get(slab) == 0);
941	/*
942	 *  Tracking extents is required by arena_reset, which is not allowed
943	 *  for auto arenas.  Bypass this step to avoid touching the extent
944	 *  linkage (often results in cache misses) for auto arenas.
945	 */
946	if (arena_is_auto(arena)) {
947		return;
948	}
949	extent_list_append(&bin->slabs_full, slab);
950}
951
952static void
953arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
954	if (arena_is_auto(arena)) {
955		return;
956	}
957	extent_list_remove(&bin->slabs_full, slab);
958}
959
960void
961arena_reset(tsd_t *tsd, arena_t *arena) {
962	/*
963	 * Locking in this function is unintuitive.  The caller guarantees that
964	 * no concurrent operations are happening in this arena, but there are
965	 * still reasons that some locking is necessary:
966	 *
967	 * - Some of the functions in the transitive closure of calls assume
968	 *   appropriate locks are held, and in some cases these locks are
969	 *   temporarily dropped to avoid lock order reversal or deadlock due to
970	 *   reentry.
971	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
972	 *   strictly speaking this is a "concurrent operation", disallowing
973	 *   stats refreshes would impose an inconvenient burden.
974	 */
975
976	/* Large allocations. */
977	malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
978
979	for (extent_t *extent = extent_list_first(&arena->large); extent !=
980	    NULL; extent = extent_list_first(&arena->large)) {
981		void *ptr = extent_base_get(extent);
982		size_t usize;
983
984		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
985		alloc_ctx_t alloc_ctx;
986		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
987		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
988		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
989		assert(alloc_ctx.szind != NSIZES);
990
991		if (config_stats || (config_prof && opt_prof)) {
992			usize = sz_index2size(alloc_ctx.szind);
993			assert(usize == isalloc(tsd_tsdn(tsd), ptr));
994		}
995		/* Remove large allocation from prof sample set. */
996		if (config_prof && opt_prof) {
997			prof_free(tsd, ptr, usize, &alloc_ctx);
998		}
999		large_dalloc(tsd_tsdn(tsd), extent);
1000		malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
1001	}
1002	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
1003
1004	/* Bins. */
1005	for (unsigned i = 0; i < NBINS; i++) {
1006		extent_t *slab;
1007		bin_t *bin = &arena->bins[i];
1008		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1009		if (bin->slabcur != NULL) {
1010			slab = bin->slabcur;
1011			bin->slabcur = NULL;
1012			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1013			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1014			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1015		}
1016		while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
1017		    NULL) {
1018			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1019			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1020			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1021		}
1022		for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
1023		    slab = extent_list_first(&bin->slabs_full)) {
1024			arena_bin_slabs_full_remove(arena, bin, slab);
1025			malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1026			arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
1027			malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1028		}
1029		if (config_stats) {
1030			bin->stats.curregs = 0;
1031			bin->stats.curslabs = 0;
1032		}
1033		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1034	}
1035
1036	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1037}
1038
1039static void
1040arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
1041	/*
1042	 * Iterate over the retained extents and destroy them.  This gives the
1043	 * extent allocator underlying the extent hooks an opportunity to unmap
1044	 * all retained memory without having to keep its own metadata
1045	 * structures.  In practice, virtual memory for dss-allocated extents is
1046	 * leaked here, so best practice is to avoid dss for arenas to be
1047	 * destroyed, or provide custom extent hooks that track retained
1048	 * dss-based extents for later reuse.
1049	 */
1050	extent_hooks_t *extent_hooks = extent_hooks_get(arena);
1051	extent_t *extent;
1052	while ((extent = extents_evict(tsdn, arena, &extent_hooks,
1053	    &arena->extents_retained, 0)) != NULL) {
1054		extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
1055	}
1056}
1057
1058void
1059arena_destroy(tsd_t *tsd, arena_t *arena) {
1060	assert(base_ind_get(arena->base) >= narenas_auto);
1061	assert(arena_nthreads_get(arena, false) == 0);
1062	assert(arena_nthreads_get(arena, true) == 0);
1063
1064	/*
1065	 * No allocations have occurred since arena_reset() was called.
1066	 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
1067	 * extents, so only retained extents may remain.
1068	 */
1069	assert(extents_npages_get(&arena->extents_dirty) == 0);
1070	assert(extents_npages_get(&arena->extents_muzzy) == 0);
1071
1072	/* Deallocate retained memory. */
1073	arena_destroy_retained(tsd_tsdn(tsd), arena);
1074
1075	/*
1076	 * Remove the arena pointer from the arenas array.  We rely on the fact
1077	 * that there is no way for the application to get a dirty read from the
1078	 * arenas array unless there is an inherent race in the application
1079	 * involving access of an arena being concurrently destroyed.  The
1080	 * application must synchronize knowledge of the arena's validity, so as
1081	 * long as we use an atomic write to update the arenas array, the
1082	 * application will get a clean read any time after it synchronizes
1083	 * knowledge that the arena is no longer valid.
1084	 */
1085	arena_set(base_ind_get(arena->base), NULL);
1086
1087	/*
1088	 * Destroy the base allocator, which manages all metadata ever mapped by
1089	 * this arena.
1090	 */
1091	base_delete(tsd_tsdn(tsd), arena->base);
1092}
1093
1094static extent_t *
1095arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
1096    extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
1097    szind_t szind) {
1098	extent_t *slab;
1099	bool zero, commit;
1100
1101	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1102	    WITNESS_RANK_CORE, 0);
1103
1104	zero = false;
1105	commit = true;
1106	slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
1107	    bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
1108
1109	if (config_stats && slab != NULL) {
1110		arena_stats_mapped_add(tsdn, &arena->stats,
1111		    bin_info->slab_size);
1112	}
1113
1114	return slab;
1115}
1116
1117static extent_t *
1118arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
1119    const bin_info_t *bin_info) {
1120	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1121	    WITNESS_RANK_CORE, 0);
1122
1123	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1124	szind_t szind = sz_size2index(bin_info->reg_size);
1125	bool zero = false;
1126	bool commit = true;
1127	extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
1128	    &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
1129	    binind, &zero, &commit);
1130	if (slab == NULL) {
1131		slab = extents_alloc(tsdn, arena, &extent_hooks,
1132		    &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
1133		    true, binind, &zero, &commit);
1134	}
1135	if (slab == NULL) {
1136		slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
1137		    bin_info, szind);
1138		if (slab == NULL) {
1139			return NULL;
1140		}
1141	}
1142	assert(extent_slab_get(slab));
1143
1144	/* Initialize slab internals. */
1145	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1146	extent_nfree_set(slab, bin_info->nregs);
1147	bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
1148
1149	arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
1150
1151	return slab;
1152}
1153
1154static extent_t *
1155arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1156    szind_t binind) {
1157	extent_t *slab;
1158	const bin_info_t *bin_info;
1159
1160	/* Look for a usable slab. */
1161	slab = arena_bin_slabs_nonfull_tryget(bin);
1162	if (slab != NULL) {
1163		return slab;
1164	}
1165	/* No existing slabs have any space available. */
1166
1167	bin_info = &bin_infos[binind];
1168
1169	/* Allocate a new slab. */
1170	malloc_mutex_unlock(tsdn, &bin->lock);
1171	/******************************/
1172	slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
1173	/********************************/
1174	malloc_mutex_lock(tsdn, &bin->lock);
1175	if (slab != NULL) {
1176		if (config_stats) {
1177			bin->stats.nslabs++;
1178			bin->stats.curslabs++;
1179		}
1180		return slab;
1181	}
1182
1183	/*
1184	 * arena_slab_alloc() failed, but another thread may have made
1185	 * sufficient memory available while this one dropped bin->lock above,
1186	 * so search one more time.
1187	 */
1188	slab = arena_bin_slabs_nonfull_tryget(bin);
1189	if (slab != NULL) {
1190		return slab;
1191	}
1192
1193	return NULL;
1194}
1195
1196/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
1197static void *
1198arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
1199    szind_t binind) {
1200	const bin_info_t *bin_info;
1201	extent_t *slab;
1202
1203	bin_info = &bin_infos[binind];
1204	if (!arena_is_auto(arena) && bin->slabcur != NULL) {
1205		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1206		bin->slabcur = NULL;
1207	}
1208	slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
1209	if (bin->slabcur != NULL) {
1210		/*
1211		 * Another thread updated slabcur while this one ran without the
1212		 * bin lock in arena_bin_nonfull_slab_get().
1213		 */
1214		if (extent_nfree_get(bin->slabcur) > 0) {
1215			void *ret = arena_slab_reg_alloc(bin->slabcur,
1216			    bin_info);
1217			if (slab != NULL) {
1218				/*
1219				 * arena_slab_alloc() may have allocated slab,
1220				 * or it may have been pulled from
1221				 * slabs_nonfull.  Therefore it is unsafe to
1222				 * make any assumptions about how slab has
1223				 * previously been used, and
1224				 * arena_bin_lower_slab() must be called, as if
1225				 * a region were just deallocated from the slab.
1226				 */
1227				if (extent_nfree_get(slab) == bin_info->nregs) {
1228					arena_dalloc_bin_slab(tsdn, arena, slab,
1229					    bin);
1230				} else {
1231					arena_bin_lower_slab(tsdn, arena, slab,
1232					    bin);
1233				}
1234			}
1235			return ret;
1236		}
1237
1238		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1239		bin->slabcur = NULL;
1240	}
1241
1242	if (slab == NULL) {
1243		return NULL;
1244	}
1245	bin->slabcur = slab;
1246
1247	assert(extent_nfree_get(bin->slabcur) > 0);
1248
1249	return arena_slab_reg_alloc(slab, bin_info);
1250}
1251
1252void
1253arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
1254    cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
1255	unsigned i, nfill;
1256	bin_t *bin;
1257
1258	assert(tbin->ncached == 0);
1259
1260	if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
1261		prof_idump(tsdn);
1262	}
1263	bin = &arena->bins[binind];
1264	malloc_mutex_lock(tsdn, &bin->lock);
1265	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1266	    tcache->lg_fill_div[binind]); i < nfill; i++) {
1267		extent_t *slab;
1268		void *ptr;
1269		if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
1270		    0) {
1271			ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1272		} else {
1273			ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1274		}
1275		if (ptr == NULL) {
1276			/*
1277			 * OOM.  tbin->avail isn't yet filled down to its first
1278			 * element, so the successful allocations (if any) must
1279			 * be moved just before tbin->avail before bailing out.
1280			 */
1281			if (i > 0) {
1282				memmove(tbin->avail - i, tbin->avail - nfill,
1283				    i * sizeof(void *));
1284			}
1285			break;
1286		}
1287		if (config_fill && unlikely(opt_junk_alloc)) {
1288			arena_alloc_junk_small(ptr, &bin_infos[binind], true);
1289		}
1290		/* Insert such that low regions get used first. */
1291		*(tbin->avail - nfill + i) = ptr;
1292	}
1293	if (config_stats) {
1294		bin->stats.nmalloc += i;
1295		bin->stats.nrequests += tbin->tstats.nrequests;
1296		bin->stats.curregs += i;
1297		bin->stats.nfills++;
1298		tbin->tstats.nrequests = 0;
1299	}
1300	malloc_mutex_unlock(tsdn, &bin->lock);
1301	tbin->ncached = i;
1302	arena_decay_tick(tsdn, arena);
1303}
1304
1305void
1306arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
1307	if (!zero) {
1308		memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
1309	}
1310}
1311
1312static void
1313arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
1314	memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
1315}
1316arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
1317    arena_dalloc_junk_small_impl;
1318
1319static void *
1320arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
1321	void *ret;
1322	bin_t *bin;
1323	size_t usize;
1324	extent_t *slab;
1325
1326	assert(binind < NBINS);
1327	bin = &arena->bins[binind];
1328	usize = sz_index2size(binind);
1329
1330	malloc_mutex_lock(tsdn, &bin->lock);
1331	if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
1332		ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
1333	} else {
1334		ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
1335	}
1336
1337	if (ret == NULL) {
1338		malloc_mutex_unlock(tsdn, &bin->lock);
1339		return NULL;
1340	}
1341
1342	if (config_stats) {
1343		bin->stats.nmalloc++;
1344		bin->stats.nrequests++;
1345		bin->stats.curregs++;
1346	}
1347	malloc_mutex_unlock(tsdn, &bin->lock);
1348	if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
1349		prof_idump(tsdn);
1350	}
1351
1352	if (!zero) {
1353		if (config_fill) {
1354			if (unlikely(opt_junk_alloc)) {
1355				arena_alloc_junk_small(ret,
1356				    &bin_infos[binind], false);
1357			} else if (unlikely(opt_zero)) {
1358				memset(ret, 0, usize);
1359			}
1360		}
1361	} else {
1362		if (config_fill && unlikely(opt_junk_alloc)) {
1363			arena_alloc_junk_small(ret, &bin_infos[binind],
1364			    true);
1365		}
1366		memset(ret, 0, usize);
1367	}
1368
1369	arena_decay_tick(tsdn, arena);
1370	return ret;
1371}
1372
1373void *
1374arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
1375    bool zero) {
1376	assert(!tsdn_null(tsdn) || arena != NULL);
1377
1378	if (likely(!tsdn_null(tsdn))) {
1379		arena = arena_choose(tsdn_tsd(tsdn), arena);
1380	}
1381	if (unlikely(arena == NULL)) {
1382		return NULL;
1383	}
1384
1385	if (likely(size <= SMALL_MAXCLASS)) {
1386		return arena_malloc_small(tsdn, arena, ind, zero);
1387	}
1388	return large_malloc(tsdn, arena, sz_index2size(ind), zero);
1389}
1390
1391void *
1392arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
1393    bool zero, tcache_t *tcache) {
1394	void *ret;
1395
1396	if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
1397	    && (usize & PAGE_MASK) == 0))) {
1398		/* Small; alignment doesn't require special slab placement. */
1399		ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1400		    zero, tcache, true);
1401	} else {
1402		if (likely(alignment <= CACHELINE)) {
1403			ret = large_malloc(tsdn, arena, usize, zero);
1404		} else {
1405			ret = large_palloc(tsdn, arena, usize, alignment, zero);
1406		}
1407	}
1408	return ret;
1409}
1410
1411void
1412arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
1413	cassert(config_prof);
1414	assert(ptr != NULL);
1415	assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1416	assert(usize <= SMALL_MAXCLASS);
1417
1418	rtree_ctx_t rtree_ctx_fallback;
1419	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1420
1421	extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1422	    (uintptr_t)ptr, true);
1423	arena_t *arena = extent_arena_get(extent);
1424
1425	szind_t szind = sz_size2index(usize);
1426	extent_szind_set(extent, szind);
1427	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1428	    szind, false);
1429
1430	prof_accum_cancel(tsdn, &arena->prof_accum, usize);
1431
1432	assert(isalloc(tsdn, ptr) == usize);
1433}
1434
1435static size_t
1436arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
1437	cassert(config_prof);
1438	assert(ptr != NULL);
1439
1440	extent_szind_set(extent, NBINS);
1441	rtree_ctx_t rtree_ctx_fallback;
1442	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1443	rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
1444	    NBINS, false);
1445
1446	assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
1447
1448	return LARGE_MINCLASS;
1449}
1450
1451void
1452arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
1453    bool slow_path) {
1454	cassert(config_prof);
1455	assert(opt_prof);
1456
1457	extent_t *extent = iealloc(tsdn, ptr);
1458	size_t usize = arena_prof_demote(tsdn, extent, ptr);
1459	if (usize <= tcache_maxclass) {
1460		tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
1461		    sz_size2index(usize), slow_path);
1462	} else {
1463		large_dalloc(tsdn, extent);
1464	}
1465}
1466
1467static void
1468arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
1469	/* Dissociate slab from bin. */
1470	if (slab == bin->slabcur) {
1471		bin->slabcur = NULL;
1472	} else {
1473		szind_t binind = extent_szind_get(slab);
1474		const bin_info_t *bin_info = &bin_infos[binind];
1475
1476		/*
1477		 * The following block's conditional is necessary because if the
1478		 * slab only contains one region, then it never gets inserted
1479		 * into the non-full slabs heap.
1480		 */
1481		if (bin_info->nregs == 1) {
1482			arena_bin_slabs_full_remove(arena, bin, slab);
1483		} else {
1484			arena_bin_slabs_nonfull_remove(bin, slab);
1485		}
1486	}
1487}
1488
1489static void
1490arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1491    bin_t *bin) {
1492	assert(slab != bin->slabcur);
1493
1494	malloc_mutex_unlock(tsdn, &bin->lock);
1495	/******************************/
1496	arena_slab_dalloc(tsdn, arena, slab);
1497	/****************************/
1498	malloc_mutex_lock(tsdn, &bin->lock);
1499	if (config_stats) {
1500		bin->stats.curslabs--;
1501	}
1502}
1503
1504static void
1505arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1506    bin_t *bin) {
1507	assert(extent_nfree_get(slab) > 0);
1508
1509	/*
1510	 * Make sure that if bin->slabcur is non-NULL, it refers to the
1511	 * oldest/lowest non-full slab.  It is okay to NULL slabcur out rather
1512	 * than proactively keeping it pointing at the oldest/lowest non-full
1513	 * slab.
1514	 */
1515	if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
1516		/* Switch slabcur. */
1517		if (extent_nfree_get(bin->slabcur) > 0) {
1518			arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1519		} else {
1520			arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1521		}
1522		bin->slabcur = slab;
1523		if (config_stats) {
1524			bin->stats.reslabs++;
1525		}
1526	} else {
1527		arena_bin_slabs_nonfull_insert(bin, slab);
1528	}
1529}
1530
1531static void
1532arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
1533    void *ptr, bool junked) {
1534	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
1535	szind_t binind = extent_szind_get(slab);
1536	bin_t *bin = &arena->bins[binind];
1537	const bin_info_t *bin_info = &bin_infos[binind];
1538
1539	if (!junked && config_fill && unlikely(opt_junk_free)) {
1540		arena_dalloc_junk_small(ptr, bin_info);
1541	}
1542
1543	arena_slab_reg_dalloc(slab, slab_data, ptr);
1544	unsigned nfree = extent_nfree_get(slab);
1545	if (nfree == bin_info->nregs) {
1546		arena_dissociate_bin_slab(arena, slab, bin);
1547		arena_dalloc_bin_slab(tsdn, arena, slab, bin);
1548	} else if (nfree == 1 && slab != bin->slabcur) {
1549		arena_bin_slabs_full_remove(arena, bin, slab);
1550		arena_bin_lower_slab(tsdn, arena, slab, bin);
1551	}
1552
1553	if (config_stats) {
1554		bin->stats.ndalloc++;
1555		bin->stats.curregs--;
1556	}
1557}
1558
1559void
1560arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
1561    void *ptr) {
1562	arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
1563}
1564
1565static void
1566arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
1567	szind_t binind = extent_szind_get(extent);
1568	bin_t *bin = &arena->bins[binind];
1569
1570	malloc_mutex_lock(tsdn, &bin->lock);
1571	arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
1572	malloc_mutex_unlock(tsdn, &bin->lock);
1573}
1574
1575void
1576arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
1577	extent_t *extent = iealloc(tsdn, ptr);
1578	arena_t *arena = extent_arena_get(extent);
1579
1580	arena_dalloc_bin(tsdn, arena, extent, ptr);
1581	arena_decay_tick(tsdn, arena);
1582}
1583
1584bool
1585arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
1586    size_t extra, bool zero) {
1587	/* Calls with non-zero extra had to clamp extra. */
1588	assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
1589
1590	if (unlikely(size > LARGE_MAXCLASS)) {
1591		return true;
1592	}
1593
1594	extent_t *extent = iealloc(tsdn, ptr);
1595	size_t usize_min = sz_s2u(size);
1596	size_t usize_max = sz_s2u(size + extra);
1597	if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
1598		/*
1599		 * Avoid moving the allocation if the size class can be left the
1600		 * same.
1601		 */
1602		assert(bin_infos[sz_size2index(oldsize)].reg_size ==
1603		    oldsize);
1604		if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
1605		    sz_size2index(oldsize)) && (size > oldsize || usize_max <
1606		    oldsize)) {
1607			return true;
1608		}
1609
1610		arena_decay_tick(tsdn, extent_arena_get(extent));
1611		return false;
1612	} else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
1613		return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
1614		    zero);
1615	}
1616
1617	return true;
1618}
1619
1620static void *
1621arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
1622    size_t alignment, bool zero, tcache_t *tcache) {
1623	if (alignment == 0) {
1624		return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
1625		    zero, tcache, true);
1626	}
1627	usize = sz_sa2u(usize, alignment);
1628	if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1629		return NULL;
1630	}
1631	return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
1632}
1633
1634void *
1635arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
1636    size_t size, size_t alignment, bool zero, tcache_t *tcache) {
1637	size_t usize = sz_s2u(size);
1638	if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
1639		return NULL;
1640	}
1641
1642	if (likely(usize <= SMALL_MAXCLASS)) {
1643		/* Try to avoid moving the allocation. */
1644		if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
1645			return ptr;
1646		}
1647	}
1648
1649	if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
1650		return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
1651		    alignment, zero, tcache);
1652	}
1653
1654	/*
1655	 * size and oldsize are different enough that we need to move the
1656	 * object.  In that case, fall back to allocating new space and copying.
1657	 */
1658	void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
1659	    zero, tcache);
1660	if (ret == NULL) {
1661		return NULL;
1662	}
1663
1664	/*
1665	 * Junk/zero-filling were already done by
1666	 * ipalloc()/arena_malloc().
1667	 */
1668
1669	size_t copysize = (usize < oldsize) ? usize : oldsize;
1670	memcpy(ret, ptr, copysize);
1671	isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
1672	return ret;
1673}
1674
1675dss_prec_t
1676arena_dss_prec_get(arena_t *arena) {
1677	return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1678}
1679
1680bool
1681arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
1682	if (!have_dss) {
1683		return (dss_prec != dss_prec_disabled);
1684	}
1685	atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1686	return false;
1687}
1688
1689ssize_t
1690arena_dirty_decay_ms_default_get(void) {
1691	return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
1692}
1693
1694bool
1695arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
1696	if (!arena_decay_ms_valid(decay_ms)) {
1697		return true;
1698	}
1699	atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1700	return false;
1701}
1702
1703ssize_t
1704arena_muzzy_decay_ms_default_get(void) {
1705	return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
1706}
1707
1708bool
1709arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
1710	if (!arena_decay_ms_valid(decay_ms)) {
1711		return true;
1712	}
1713	atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
1714	return false;
1715}
1716
1717bool
1718arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
1719    size_t *new_limit) {
1720	assert(opt_retain);
1721
1722	pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
1723	if (new_limit != NULL) {
1724		size_t limit = *new_limit;
1725		/* Grow no more than the new limit. */
1726		if ((new_ind = sz_psz2ind(limit + 1) - 1) >
1727		     EXTENT_GROW_MAX_PIND) {
1728			return true;
1729		}
1730	}
1731
1732	malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1733	if (old_limit != NULL) {
1734		*old_limit = sz_pind2sz(arena->retain_grow_limit);
1735	}
1736	if (new_limit != NULL) {
1737		arena->retain_grow_limit = new_ind;
1738	}
1739	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
1740
1741	return false;
1742}
1743
1744unsigned
1745arena_nthreads_get(arena_t *arena, bool internal) {
1746	return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1747}
1748
1749void
1750arena_nthreads_inc(arena_t *arena, bool internal) {
1751	atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1752}
1753
1754void
1755arena_nthreads_dec(arena_t *arena, bool internal) {
1756	atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1757}
1758
1759size_t
1760arena_extent_sn_next(arena_t *arena) {
1761	return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
1762}
1763
1764arena_t *
1765arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
1766	arena_t *arena;
1767	base_t *base;
1768	unsigned i;
1769
1770	if (ind == 0) {
1771		base = b0get();
1772	} else {
1773		base = base_new(tsdn, ind, extent_hooks);
1774		if (base == NULL) {
1775			return NULL;
1776		}
1777	}
1778
1779	arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
1780	if (arena == NULL) {
1781		goto label_error;
1782	}
1783
1784	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1785	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1786	arena->last_thd = NULL;
1787
1788	if (config_stats) {
1789		if (arena_stats_init(tsdn, &arena->stats)) {
1790			goto label_error;
1791		}
1792
1793		ql_new(&arena->tcache_ql);
1794		ql_new(&arena->cache_bin_array_descriptor_ql);
1795		if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1796		    WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
1797			goto label_error;
1798		}
1799	}
1800
1801	if (config_prof) {
1802		if (prof_accum_init(tsdn, &arena->prof_accum)) {
1803			goto label_error;
1804		}
1805	}
1806
1807	if (config_cache_oblivious) {
1808		/*
1809		 * A nondeterministic seed based on the address of arena reduces
1810		 * the likelihood of lockstep non-uniform cache index
1811		 * utilization among identical concurrent processes, but at the
1812		 * cost of test repeatability.  For debug builds, instead use a
1813		 * deterministic seed.
1814		 */
1815		atomic_store_zu(&arena->offset_state, config_debug ? ind :
1816		    (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
1817	}
1818
1819	atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
1820
1821	atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
1822	    ATOMIC_RELAXED);
1823
1824	atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
1825
1826	extent_list_init(&arena->large);
1827	if (malloc_mutex_init(&arena->large_mtx, "arena_large",
1828	    WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
1829		goto label_error;
1830	}
1831
1832	/*
1833	 * Delay coalescing for dirty extents despite the disruptive effect on
1834	 * memory layout for best-fit extent allocation, since cached extents
1835	 * are likely to be reused soon after deallocation, and the cost of
1836	 * merging/splitting extents is non-trivial.
1837	 */
1838	if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
1839	    true)) {
1840		goto label_error;
1841	}
1842	/*
1843	 * Coalesce muzzy extents immediately, because operations on them are in
1844	 * the critical path much less often than for dirty extents.
1845	 */
1846	if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
1847	    false)) {
1848		goto label_error;
1849	}
1850	/*
1851	 * Coalesce retained extents immediately, in part because they will
1852	 * never be evicted (and therefore there's no opportunity for delayed
1853	 * coalescing), but also because operations on retained extents are not
1854	 * in the critical path.
1855	 */
1856	if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
1857	    false)) {
1858		goto label_error;
1859	}
1860
1861	if (arena_decay_init(&arena->decay_dirty,
1862	    arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
1863		goto label_error;
1864	}
1865	if (arena_decay_init(&arena->decay_muzzy,
1866	    arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
1867		goto label_error;
1868	}
1869
1870	arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
1871	arena->retain_grow_limit = EXTENT_GROW_MAX_PIND;
1872	if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
1873	    WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
1874		goto label_error;
1875	}
1876
1877	extent_avail_new(&arena->extent_avail);
1878	if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
1879	    WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
1880		goto label_error;
1881	}
1882
1883	/* Initialize bins. */
1884	for (i = 0; i < NBINS; i++) {
1885		bool err = bin_init(&arena->bins[i]);
1886		if (err) {
1887			goto label_error;
1888		}
1889	}
1890
1891	arena->base = base;
1892	/* Set arena before creating background threads. */
1893	arena_set(ind, arena);
1894
1895	nstime_init(&arena->create_time, 0);
1896	nstime_update(&arena->create_time);
1897
1898	/* We don't support reentrancy for arena 0 bootstrapping. */
1899	if (ind != 0) {
1900		/*
1901		 * If we're here, then arena 0 already exists, so bootstrapping
1902		 * is done enough that we should have tsd.
1903		 */
1904		assert(!tsdn_null(tsdn));
1905		pre_reentrancy(tsdn_tsd(tsdn), arena);
1906		if (hooks_arena_new_hook) {
1907			hooks_arena_new_hook();
1908		}
1909		post_reentrancy(tsdn_tsd(tsdn));
1910	}
1911
1912	return arena;
1913label_error:
1914	if (ind != 0) {
1915		base_delete(tsdn, base);
1916	}
1917	return NULL;
1918}
1919
1920void
1921arena_boot(void) {
1922	arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
1923	arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
1924#define REGIND_bin_yes(index, reg_size) 				\
1925	div_init(&arena_binind_div_info[(index)], (reg_size));
1926#define REGIND_bin_no(index, reg_size)
1927#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs,		\
1928    lg_delta_lookup)							\
1929	REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
1930	SIZE_CLASSES
1931#undef REGIND_bin_yes
1932#undef REGIND_bin_no
1933#undef SC
1934}
1935
1936void
1937arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
1938	malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
1939	malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
1940}
1941
1942void
1943arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
1944	if (config_stats) {
1945		malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
1946	}
1947}
1948
1949void
1950arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
1951	malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
1952}
1953
1954void
1955arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
1956	extents_prefork(tsdn, &arena->extents_dirty);
1957	extents_prefork(tsdn, &arena->extents_muzzy);
1958	extents_prefork(tsdn, &arena->extents_retained);
1959}
1960
1961void
1962arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
1963	malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
1964}
1965
1966void
1967arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
1968	base_prefork(tsdn, arena->base);
1969}
1970
1971void
1972arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
1973	malloc_mutex_prefork(tsdn, &arena->large_mtx);
1974}
1975
1976void
1977arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
1978	for (unsigned i = 0; i < NBINS; i++) {
1979		bin_prefork(tsdn, &arena->bins[i]);
1980	}
1981}
1982
1983void
1984arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
1985	unsigned i;
1986
1987	for (i = 0; i < NBINS; i++) {
1988		bin_postfork_parent(tsdn, &arena->bins[i]);
1989	}
1990	malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
1991	base_postfork_parent(tsdn, arena->base);
1992	malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
1993	extents_postfork_parent(tsdn, &arena->extents_dirty);
1994	extents_postfork_parent(tsdn, &arena->extents_muzzy);
1995	extents_postfork_parent(tsdn, &arena->extents_retained);
1996	malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
1997	malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
1998	malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
1999	if (config_stats) {
2000		malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
2001	}
2002}
2003
2004void
2005arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
2006	unsigned i;
2007
2008	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
2009	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
2010	if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
2011		arena_nthreads_inc(arena, false);
2012	}
2013	if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
2014		arena_nthreads_inc(arena, true);
2015	}
2016	if (config_stats) {
2017		ql_new(&arena->tcache_ql);
2018		ql_new(&arena->cache_bin_array_descriptor_ql);
2019		tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
2020		if (tcache != NULL && tcache->arena == arena) {
2021			ql_elm_new(tcache, link);
2022			ql_tail_insert(&arena->tcache_ql, tcache, link);
2023			cache_bin_array_descriptor_init(
2024			    &tcache->cache_bin_array_descriptor,
2025			    tcache->bins_small, tcache->bins_large);
2026			ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
2027			    &tcache->cache_bin_array_descriptor, link);
2028		}
2029	}
2030
2031	for (i = 0; i < NBINS; i++) {
2032		bin_postfork_child(tsdn, &arena->bins[i]);
2033	}
2034	malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
2035	base_postfork_child(tsdn, arena->base);
2036	malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
2037	extents_postfork_child(tsdn, &arena->extents_dirty);
2038	extents_postfork_child(tsdn, &arena->extents_muzzy);
2039	extents_postfork_child(tsdn, &arena->extents_retained);
2040	malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
2041	malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
2042	malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
2043	if (config_stats) {
2044		malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
2045	}
2046}
2047