1234370Sjasone#define	JEMALLOC_ARENA_C_
2234370Sjasone#include "jemalloc/internal/jemalloc_internal.h"
3234370Sjasone
4234370Sjasone/******************************************************************************/
5234370Sjasone/* Data. */
6234370Sjasone
7296221Sjasonepurge_mode_t	opt_purge = PURGE_DEFAULT;
8296221Sjasoneconst char	*purge_mode_names[] = {
9296221Sjasone	"ratio",
10296221Sjasone	"decay",
11296221Sjasone	"N/A"
12296221Sjasone};
13234370Sjasonessize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
14286866Sjasonestatic ssize_t	lg_dirty_mult_default;
15296221Sjasonessize_t		opt_decay_time = DECAY_TIME_DEFAULT;
16296221Sjasonestatic ssize_t	decay_time_default;
17296221Sjasone
18234370Sjasonearena_bin_info_t	arena_bin_info[NBINS];
19234370Sjasone
20286866Sjasonesize_t		map_bias;
21286866Sjasonesize_t		map_misc_offset;
22286866Sjasonesize_t		arena_maxrun; /* Max run size for arenas. */
23288090Sjasonesize_t		large_maxclass; /* Max large size class. */
24296221Sjasonesize_t		run_quantize_max; /* Max run_quantize_*() input. */
25296221Sjasonestatic size_t	small_maxrun; /* Max run size for small size classes. */
26286866Sjasonestatic bool	*small_run_tab; /* Valid small run page multiples. */
27296221Sjasonestatic size_t	*run_quantize_floor_tab; /* run_quantize_floor() memoization. */
28296221Sjasonestatic size_t	*run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
29286866Sjasoneunsigned	nlclasses; /* Number of large size classes. */
30286866Sjasoneunsigned	nhclasses; /* Number of huge size classes. */
31296221Sjasonestatic szind_t	runs_avail_bias; /* Size index for first runs_avail tree. */
32296221Sjasonestatic szind_t	runs_avail_nclasses; /* Number of runs_avail trees. */
33234370Sjasone
34234370Sjasone/******************************************************************************/
35261071Sjasone/*
36261071Sjasone * Function prototypes for static functions that are referenced prior to
37261071Sjasone * definition.
38261071Sjasone */
39234370Sjasone
40299587Sjasonestatic void	arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
41299587Sjasone    size_t ndirty_limit);
42299587Sjasonestatic void	arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
43299587Sjasone    bool dirty, bool cleaned, bool decommitted);
44299587Sjasonestatic void	arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
45299587Sjasone    arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
46234370Sjasonestatic void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
47234370Sjasone    arena_run_t *run, arena_bin_t *bin);
48234370Sjasone
49234370Sjasone/******************************************************************************/
50234370Sjasone
51286866SjasoneJEMALLOC_INLINE_C size_t
52296221Sjasonearena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
53234370Sjasone{
54286866Sjasone	arena_chunk_t *chunk;
55286866Sjasone	size_t pageind, mapbits;
56234370Sjasone
57286866Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
58286866Sjasone	pageind = arena_miscelm_to_pageind(miscelm);
59286866Sjasone	mapbits = arena_mapbits_get(chunk, pageind);
60288090Sjasone	return (arena_mapbits_size_decode(mapbits));
61234370Sjasone}
62234370Sjasone
63286866SjasoneJEMALLOC_INLINE_C int
64296221Sjasonearena_run_addr_comp(const arena_chunk_map_misc_t *a,
65296221Sjasone    const arena_chunk_map_misc_t *b)
66242844Sjasone{
67286866Sjasone	uintptr_t a_miscelm = (uintptr_t)a;
68286866Sjasone	uintptr_t b_miscelm = (uintptr_t)b;
69242844Sjasone
70242844Sjasone	assert(a != NULL);
71242844Sjasone	assert(b != NULL);
72242844Sjasone
73286866Sjasone	return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
74286866Sjasone}
75286866Sjasone
76299587Sjasone/* Generate pairing heap functions. */
77299587Sjasoneph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
78299587Sjasone    ph_link, arena_run_addr_comp)
79286866Sjasone
80286866Sjasonestatic size_t
81296221Sjasonerun_quantize_floor_compute(size_t size)
82286866Sjasone{
83286866Sjasone	size_t qsize;
84286866Sjasone
85286866Sjasone	assert(size != 0);
86286866Sjasone	assert(size == PAGE_CEILING(size));
87286866Sjasone
88286866Sjasone	/* Don't change sizes that are valid small run sizes. */
89286866Sjasone	if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
90286866Sjasone		return (size);
91286866Sjasone
92242844Sjasone	/*
93286866Sjasone	 * Round down to the nearest run size that can actually be requested
94286866Sjasone	 * during normal large allocation.  Add large_pad so that cache index
95286866Sjasone	 * randomization can offset the allocation from the page boundary.
96242844Sjasone	 */
97286866Sjasone	qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
98286866Sjasone	if (qsize <= SMALL_MAXCLASS + large_pad)
99296221Sjasone		return (run_quantize_floor_compute(size - large_pad));
100286866Sjasone	assert(qsize <= size);
101286866Sjasone	return (qsize);
102286866Sjasone}
103242844Sjasone
104286866Sjasonestatic size_t
105296221Sjasonerun_quantize_ceil_compute_hard(size_t size)
106286866Sjasone{
107286866Sjasone	size_t large_run_size_next;
108286866Sjasone
109286866Sjasone	assert(size != 0);
110286866Sjasone	assert(size == PAGE_CEILING(size));
111286866Sjasone
112242844Sjasone	/*
113286866Sjasone	 * Return the next quantized size greater than the input size.
114286866Sjasone	 * Quantized sizes comprise the union of run sizes that back small
115286866Sjasone	 * region runs, and run sizes that back large regions with no explicit
116286866Sjasone	 * alignment constraints.
117242844Sjasone	 */
118242844Sjasone
119286866Sjasone	if (size > SMALL_MAXCLASS) {
120286866Sjasone		large_run_size_next = PAGE_CEILING(index2size(size2index(size -
121286866Sjasone		    large_pad) + 1) + large_pad);
122286866Sjasone	} else
123286866Sjasone		large_run_size_next = SIZE_T_MAX;
124286866Sjasone	if (size >= small_maxrun)
125286866Sjasone		return (large_run_size_next);
126286866Sjasone
127286866Sjasone	while (true) {
128286866Sjasone		size += PAGE;
129286866Sjasone		assert(size <= small_maxrun);
130286866Sjasone		if (small_run_tab[size >> LG_PAGE]) {
131286866Sjasone			if (large_run_size_next < size)
132286866Sjasone				return (large_run_size_next);
133286866Sjasone			return (size);
134242844Sjasone		}
135242844Sjasone	}
136242844Sjasone}
137242844Sjasone
138286866Sjasonestatic size_t
139296221Sjasonerun_quantize_ceil_compute(size_t size)
140242844Sjasone{
141296221Sjasone	size_t qsize = run_quantize_floor_compute(size);
142242844Sjasone
143286866Sjasone	if (qsize < size) {
144286866Sjasone		/*
145286866Sjasone		 * Skip a quantization that may have an adequately large run,
146286866Sjasone		 * because under-sized runs may be mixed in.  This only happens
147286866Sjasone		 * when an unusual size is requested, i.e. for aligned
148286866Sjasone		 * allocation, and is just one of several places where linear
149286866Sjasone		 * search would potentially find sufficiently aligned available
150286866Sjasone		 * memory somewhere lower.
151286866Sjasone		 */
152296221Sjasone		qsize = run_quantize_ceil_compute_hard(qsize);
153242844Sjasone	}
154286866Sjasone	return (qsize);
155242844Sjasone}
156242844Sjasone
157296221Sjasone#ifdef JEMALLOC_JET
158296221Sjasone#undef run_quantize_floor
159299587Sjasone#define	run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
160296221Sjasone#endif
161296221Sjasonestatic size_t
162296221Sjasonerun_quantize_floor(size_t size)
163242844Sjasone{
164296221Sjasone	size_t ret;
165242844Sjasone
166296221Sjasone	assert(size > 0);
167296221Sjasone	assert(size <= run_quantize_max);
168296221Sjasone	assert((size & PAGE_MASK) == 0);
169286866Sjasone
170296221Sjasone	ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
171296221Sjasone	assert(ret == run_quantize_floor_compute(size));
172296221Sjasone	return (ret);
173296221Sjasone}
174296221Sjasone#ifdef JEMALLOC_JET
175296221Sjasone#undef run_quantize_floor
176296221Sjasone#define	run_quantize_floor JEMALLOC_N(run_quantize_floor)
177299587Sjasonerun_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
178296221Sjasone#endif
179286866Sjasone
180296221Sjasone#ifdef JEMALLOC_JET
181296221Sjasone#undef run_quantize_ceil
182299587Sjasone#define	run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
183296221Sjasone#endif
184296221Sjasonestatic size_t
185296221Sjasonerun_quantize_ceil(size_t size)
186296221Sjasone{
187296221Sjasone	size_t ret;
188296221Sjasone
189296221Sjasone	assert(size > 0);
190296221Sjasone	assert(size <= run_quantize_max);
191296221Sjasone	assert((size & PAGE_MASK) == 0);
192296221Sjasone
193296221Sjasone	ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
194296221Sjasone	assert(ret == run_quantize_ceil_compute(size));
195242844Sjasone	return (ret);
196242844Sjasone}
197296221Sjasone#ifdef JEMALLOC_JET
198296221Sjasone#undef run_quantize_ceil
199296221Sjasone#define	run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
200299587Sjasonerun_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
201296221Sjasone#endif
202242844Sjasone
203299587Sjasonestatic arena_run_heap_t *
204296221Sjasonearena_runs_avail_get(arena_t *arena, szind_t ind)
205296221Sjasone{
206286866Sjasone
207296221Sjasone	assert(ind >= runs_avail_bias);
208296221Sjasone	assert(ind - runs_avail_bias < runs_avail_nclasses);
209296221Sjasone
210296221Sjasone	return (&arena->runs_avail[ind - runs_avail_bias]);
211296221Sjasone}
212296221Sjasone
213286866Sjasonestatic void
214286866Sjasonearena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
215286866Sjasone    size_t npages)
216242844Sjasone{
217296221Sjasone	szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
218299587Sjasone	    arena_miscelm_get_const(chunk, pageind))));
219286866Sjasone	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
220286866Sjasone	    LG_PAGE));
221299587Sjasone	arena_run_heap_insert(arena_runs_avail_get(arena, ind),
222299587Sjasone	    arena_miscelm_get_mutable(chunk, pageind));
223242844Sjasone}
224242844Sjasone
225242844Sjasonestatic void
226286866Sjasonearena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
227286866Sjasone    size_t npages)
228242844Sjasone{
229296221Sjasone	szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
230299587Sjasone	    arena_miscelm_get_const(chunk, pageind))));
231242844Sjasone	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
232242844Sjasone	    LG_PAGE));
233299587Sjasone	arena_run_heap_remove(arena_runs_avail_get(arena, ind),
234299587Sjasone	    arena_miscelm_get_mutable(chunk, pageind));
235286866Sjasone}
236242844Sjasone
237286866Sjasonestatic void
238286866Sjasonearena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
239286866Sjasone    size_t npages)
240286866Sjasone{
241299587Sjasone	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
242299587Sjasone	    pageind);
243242844Sjasone
244286866Sjasone	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
245286866Sjasone	    LG_PAGE));
246286866Sjasone	assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
247286866Sjasone	assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
248286866Sjasone	    CHUNK_MAP_DIRTY);
249242844Sjasone
250286866Sjasone	qr_new(&miscelm->rd, rd_link);
251286866Sjasone	qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
252286866Sjasone	arena->ndirty += npages;
253242844Sjasone}
254242844Sjasone
255242844Sjasonestatic void
256286866Sjasonearena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
257286866Sjasone    size_t npages)
258242844Sjasone{
259299587Sjasone	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
260299587Sjasone	    pageind);
261242844Sjasone
262242844Sjasone	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
263242844Sjasone	    LG_PAGE));
264286866Sjasone	assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
265286866Sjasone	assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
266286866Sjasone	    CHUNK_MAP_DIRTY);
267242844Sjasone
268286866Sjasone	qr_remove(&miscelm->rd, rd_link);
269286866Sjasone	assert(arena->ndirty >= npages);
270286866Sjasone	arena->ndirty -= npages;
271286866Sjasone}
272242844Sjasone
273286866Sjasonestatic size_t
274286866Sjasonearena_chunk_dirty_npages(const extent_node_t *node)
275286866Sjasone{
276242844Sjasone
277286866Sjasone	return (extent_node_size_get(node) >> LG_PAGE);
278286866Sjasone}
279286866Sjasone
280286866Sjasonevoid
281286866Sjasonearena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
282286866Sjasone{
283286866Sjasone
284286866Sjasone	if (cache) {
285286866Sjasone		extent_node_dirty_linkage_init(node);
286286866Sjasone		extent_node_dirty_insert(node, &arena->runs_dirty,
287286866Sjasone		    &arena->chunks_cache);
288286866Sjasone		arena->ndirty += arena_chunk_dirty_npages(node);
289242844Sjasone	}
290286866Sjasone}
291242844Sjasone
292286866Sjasonevoid
293286866Sjasonearena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
294286866Sjasone{
295286866Sjasone
296286866Sjasone	if (dirty) {
297286866Sjasone		extent_node_dirty_remove(node);
298286866Sjasone		assert(arena->ndirty >= arena_chunk_dirty_npages(node));
299286866Sjasone		arena->ndirty -= arena_chunk_dirty_npages(node);
300286866Sjasone	}
301242844Sjasone}
302242844Sjasone
303286866SjasoneJEMALLOC_INLINE_C void *
304234370Sjasonearena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
305234370Sjasone{
306234370Sjasone	void *ret;
307296221Sjasone	size_t regind;
308286866Sjasone	arena_chunk_map_misc_t *miscelm;
309286866Sjasone	void *rpages;
310234370Sjasone
311234370Sjasone	assert(run->nfree > 0);
312286866Sjasone	assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
313234370Sjasone
314296221Sjasone	regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
315286866Sjasone	miscelm = arena_run_to_miscelm(run);
316286866Sjasone	rpages = arena_miscelm_to_rpages(miscelm);
317286866Sjasone	ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
318234370Sjasone	    (uintptr_t)(bin_info->reg_interval * regind));
319234370Sjasone	run->nfree--;
320234370Sjasone	return (ret);
321234370Sjasone}
322234370Sjasone
323286866SjasoneJEMALLOC_INLINE_C void
324234370Sjasonearena_run_reg_dalloc(arena_run_t *run, void *ptr)
325234370Sjasone{
326234370Sjasone	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
327235238Sjasone	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
328235238Sjasone	size_t mapbits = arena_mapbits_get(chunk, pageind);
329288090Sjasone	szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
330234370Sjasone	arena_bin_info_t *bin_info = &arena_bin_info[binind];
331296221Sjasone	size_t regind = arena_run_regind(run, bin_info, ptr);
332234370Sjasone
333234370Sjasone	assert(run->nfree < bin_info->nregs);
334234370Sjasone	/* Freeing an interior pointer can cause assertion failure. */
335286866Sjasone	assert(((uintptr_t)ptr -
336286866Sjasone	    ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
337234370Sjasone	    (uintptr_t)bin_info->reg0_offset)) %
338234370Sjasone	    (uintptr_t)bin_info->reg_interval == 0);
339286866Sjasone	assert((uintptr_t)ptr >=
340286866Sjasone	    (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
341234370Sjasone	    (uintptr_t)bin_info->reg0_offset);
342234370Sjasone	/* Freeing an unallocated pointer can cause assertion failure. */
343286866Sjasone	assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
344234370Sjasone
345286866Sjasone	bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
346234370Sjasone	run->nfree++;
347234370Sjasone}
348234370Sjasone
349286866SjasoneJEMALLOC_INLINE_C void
350245868Sjasonearena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
351234370Sjasone{
352245868Sjasone
353286866Sjasone	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
354286866Sjasone	    (run_ind << LG_PAGE)), (npages << LG_PAGE));
355245868Sjasone	memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
356245868Sjasone	    (npages << LG_PAGE));
357245868Sjasone}
358245868Sjasone
359286866SjasoneJEMALLOC_INLINE_C void
360256823Sjasonearena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
361256823Sjasone{
362256823Sjasone
363286866Sjasone	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
364286866Sjasone	    << LG_PAGE)), PAGE);
365256823Sjasone}
366256823Sjasone
367286866SjasoneJEMALLOC_INLINE_C void
368245868Sjasonearena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
369245868Sjasone{
370234370Sjasone	size_t i;
371234370Sjasone	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
372234370Sjasone
373256823Sjasone	arena_run_page_mark_zeroed(chunk, run_ind);
374234370Sjasone	for (i = 0; i < PAGE / sizeof(size_t); i++)
375234370Sjasone		assert(p[i] == 0);
376234370Sjasone}
377234370Sjasone
378234370Sjasonestatic void
379296221Sjasonearena_nactive_add(arena_t *arena, size_t add_pages)
380234370Sjasone{
381234370Sjasone
382261071Sjasone	if (config_stats) {
383296221Sjasone		size_t cactive_add = CHUNK_CEILING((arena->nactive +
384296221Sjasone		    add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
385286866Sjasone		    LG_PAGE);
386296221Sjasone		if (cactive_add != 0)
387296221Sjasone			stats_cactive_add(cactive_add);
388261071Sjasone	}
389296221Sjasone	arena->nactive += add_pages;
390261071Sjasone}
391235238Sjasone
392261071Sjasonestatic void
393296221Sjasonearena_nactive_sub(arena_t *arena, size_t sub_pages)
394296221Sjasone{
395296221Sjasone
396296221Sjasone	if (config_stats) {
397296221Sjasone		size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
398296221Sjasone		    CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
399296221Sjasone		if (cactive_sub != 0)
400296221Sjasone			stats_cactive_sub(cactive_sub);
401296221Sjasone	}
402296221Sjasone	arena->nactive -= sub_pages;
403296221Sjasone}
404296221Sjasone
405296221Sjasonestatic void
406261071Sjasonearena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
407286866Sjasone    size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
408261071Sjasone{
409261071Sjasone	size_t total_pages, rem_pages;
410261071Sjasone
411286866Sjasone	assert(flag_dirty == 0 || flag_decommitted == 0);
412286866Sjasone
413235238Sjasone	total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
414234370Sjasone	    LG_PAGE;
415235238Sjasone	assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
416235238Sjasone	    flag_dirty);
417234370Sjasone	assert(need_pages <= total_pages);
418234370Sjasone	rem_pages = total_pages - need_pages;
419234370Sjasone
420286866Sjasone	arena_avail_remove(arena, chunk, run_ind, total_pages);
421286866Sjasone	if (flag_dirty != 0)
422286866Sjasone		arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
423296221Sjasone	arena_nactive_add(arena, need_pages);
424234370Sjasone
425234370Sjasone	/* Keep track of trailing unused pages for later use. */
426234370Sjasone	if (rem_pages > 0) {
427286866Sjasone		size_t flags = flag_dirty | flag_decommitted;
428286866Sjasone		size_t flag_unzeroed_mask = (flags == 0) ?  CHUNK_MAP_UNZEROED :
429286866Sjasone		    0;
430286866Sjasone
431286866Sjasone		arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
432286866Sjasone		    (rem_pages << LG_PAGE), flags |
433286866Sjasone		    (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
434286866Sjasone		    flag_unzeroed_mask));
435286866Sjasone		arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
436286866Sjasone		    (rem_pages << LG_PAGE), flags |
437286866Sjasone		    (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
438286866Sjasone		    flag_unzeroed_mask));
439234370Sjasone		if (flag_dirty != 0) {
440286866Sjasone			arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
441286866Sjasone			    rem_pages);
442234370Sjasone		}
443286866Sjasone		arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
444234370Sjasone	}
445261071Sjasone}
446234370Sjasone
447286866Sjasonestatic bool
448261071Sjasonearena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
449261071Sjasone    bool remove, bool zero)
450261071Sjasone{
451261071Sjasone	arena_chunk_t *chunk;
452286866Sjasone	arena_chunk_map_misc_t *miscelm;
453288090Sjasone	size_t flag_dirty, flag_decommitted, run_ind, need_pages;
454286866Sjasone	size_t flag_unzeroed_mask;
455261071Sjasone
456261071Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
457286866Sjasone	miscelm = arena_run_to_miscelm(run);
458286866Sjasone	run_ind = arena_miscelm_to_pageind(miscelm);
459261071Sjasone	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
460286866Sjasone	flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
461261071Sjasone	need_pages = (size >> LG_PAGE);
462261071Sjasone	assert(need_pages > 0);
463261071Sjasone
464286866Sjasone	if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
465286866Sjasone	    run_ind << LG_PAGE, size, arena->ind))
466286866Sjasone		return (true);
467286866Sjasone
468261071Sjasone	if (remove) {
469261071Sjasone		arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
470286866Sjasone		    flag_decommitted, need_pages);
471261071Sjasone	}
472261071Sjasone
473261071Sjasone	if (zero) {
474286866Sjasone		if (flag_decommitted != 0) {
475286866Sjasone			/* The run is untouched, and therefore zeroed. */
476286866Sjasone			JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
477286866Sjasone			    *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
478286866Sjasone			    (need_pages << LG_PAGE));
479286866Sjasone		} else if (flag_dirty != 0) {
480286866Sjasone			/* The run is dirty, so all pages must be zeroed. */
481286866Sjasone			arena_run_zero(chunk, run_ind, need_pages);
482286866Sjasone		} else {
483261071Sjasone			/*
484261071Sjasone			 * The run is clean, so some pages may be zeroed (i.e.
485261071Sjasone			 * never before touched).
486261071Sjasone			 */
487288090Sjasone			size_t i;
488261071Sjasone			for (i = 0; i < need_pages; i++) {
489261071Sjasone				if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
490261071Sjasone				    != 0)
491261071Sjasone					arena_run_zero(chunk, run_ind+i, 1);
492261071Sjasone				else if (config_debug) {
493261071Sjasone					arena_run_page_validate_zeroed(chunk,
494261071Sjasone					    run_ind+i);
495261071Sjasone				} else {
496261071Sjasone					arena_run_page_mark_zeroed(chunk,
497261071Sjasone					    run_ind+i);
498234370Sjasone				}
499234370Sjasone			}
500234370Sjasone		}
501234370Sjasone	} else {
502286866Sjasone		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
503256823Sjasone		    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
504234370Sjasone	}
505261071Sjasone
506261071Sjasone	/*
507261071Sjasone	 * Set the last element first, in case the run only contains one page
508261071Sjasone	 * (i.e. both statements set the same element).
509261071Sjasone	 */
510286866Sjasone	flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
511286866Sjasone	    CHUNK_MAP_UNZEROED : 0;
512286866Sjasone	arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
513286866Sjasone	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
514286866Sjasone	    run_ind+need_pages-1)));
515286866Sjasone	arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
516286866Sjasone	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
517286866Sjasone	return (false);
518234370Sjasone}
519234370Sjasone
520286866Sjasonestatic bool
521261071Sjasonearena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
522261071Sjasone{
523261071Sjasone
524286866Sjasone	return (arena_run_split_large_helper(arena, run, size, true, zero));
525261071Sjasone}
526261071Sjasone
527286866Sjasonestatic bool
528261071Sjasonearena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
529261071Sjasone{
530261071Sjasone
531286866Sjasone	return (arena_run_split_large_helper(arena, run, size, false, zero));
532261071Sjasone}
533261071Sjasone
534286866Sjasonestatic bool
535261071Sjasonearena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
536288090Sjasone    szind_t binind)
537261071Sjasone{
538261071Sjasone	arena_chunk_t *chunk;
539286866Sjasone	arena_chunk_map_misc_t *miscelm;
540286866Sjasone	size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
541261071Sjasone
542261071Sjasone	assert(binind != BININD_INVALID);
543261071Sjasone
544261071Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
545286866Sjasone	miscelm = arena_run_to_miscelm(run);
546286866Sjasone	run_ind = arena_miscelm_to_pageind(miscelm);
547261071Sjasone	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
548286866Sjasone	flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
549261071Sjasone	need_pages = (size >> LG_PAGE);
550261071Sjasone	assert(need_pages > 0);
551261071Sjasone
552286866Sjasone	if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
553286866Sjasone	    run_ind << LG_PAGE, size, arena->ind))
554286866Sjasone		return (true);
555261071Sjasone
556286866Sjasone	arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
557286866Sjasone	    flag_decommitted, need_pages);
558286866Sjasone
559286866Sjasone	for (i = 0; i < need_pages; i++) {
560286866Sjasone		size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
561286866Sjasone		    run_ind+i);
562286866Sjasone		arena_mapbits_small_set(chunk, run_ind+i, i, binind,
563286866Sjasone		    flag_unzeroed);
564286866Sjasone		if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
565261071Sjasone			arena_run_page_validate_zeroed(chunk, run_ind+i);
566261071Sjasone	}
567286866Sjasone	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
568261071Sjasone	    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
569286866Sjasone	return (false);
570261071Sjasone}
571261071Sjasone
572234370Sjasonestatic arena_chunk_t *
573261071Sjasonearena_chunk_init_spare(arena_t *arena)
574234370Sjasone{
575234370Sjasone	arena_chunk_t *chunk;
576234370Sjasone
577261071Sjasone	assert(arena->spare != NULL);
578234370Sjasone
579261071Sjasone	chunk = arena->spare;
580261071Sjasone	arena->spare = NULL;
581234370Sjasone
582261071Sjasone	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
583261071Sjasone	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
584261071Sjasone	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
585286866Sjasone	    arena_maxrun);
586261071Sjasone	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
587286866Sjasone	    arena_maxrun);
588261071Sjasone	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
589261071Sjasone	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
590234370Sjasone
591261071Sjasone	return (chunk);
592261071Sjasone}
593234370Sjasone
594286866Sjasonestatic bool
595299587Sjasonearena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
596299587Sjasone    bool zero)
597286866Sjasone{
598286866Sjasone
599286866Sjasone	/*
600286866Sjasone	 * The extent node notion of "committed" doesn't directly apply to
601286866Sjasone	 * arena chunks.  Arbitrarily mark them as committed.  The commit state
602286866Sjasone	 * of runs is tracked individually, and upon chunk deallocation the
603286866Sjasone	 * entire chunk is in a consistent commit state.
604286866Sjasone	 */
605286866Sjasone	extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
606286866Sjasone	extent_node_achunk_set(&chunk->node, true);
607299587Sjasone	return (chunk_register(tsdn, chunk, &chunk->node));
608286866Sjasone}
609286866Sjasone
610261071Sjasonestatic arena_chunk_t *
611299587Sjasonearena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
612299587Sjasone    chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
613286866Sjasone{
614286866Sjasone	arena_chunk_t *chunk;
615286866Sjasone
616299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
617286866Sjasone
618299587Sjasone	chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
619299587Sjasone	    NULL, chunksize, chunksize, zero, commit);
620286866Sjasone	if (chunk != NULL && !*commit) {
621286866Sjasone		/* Commit header. */
622286866Sjasone		if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
623286866Sjasone		    LG_PAGE, arena->ind)) {
624299587Sjasone			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
625299587Sjasone			    (void *)chunk, chunksize, *zero, *commit);
626286866Sjasone			chunk = NULL;
627286866Sjasone		}
628286866Sjasone	}
629299587Sjasone	if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
630286866Sjasone		if (!*commit) {
631286866Sjasone			/* Undo commit of header. */
632286866Sjasone			chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
633286866Sjasone			    LG_PAGE, arena->ind);
634286866Sjasone		}
635299587Sjasone		chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
636299587Sjasone		    chunksize, *zero, *commit);
637286866Sjasone		chunk = NULL;
638286866Sjasone	}
639286866Sjasone
640299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
641286866Sjasone	return (chunk);
642286866Sjasone}
643286866Sjasone
644286866Sjasonestatic arena_chunk_t *
645299587Sjasonearena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
646299587Sjasone    bool *commit)
647286866Sjasone{
648286866Sjasone	arena_chunk_t *chunk;
649286866Sjasone	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
650286866Sjasone
651299587Sjasone	chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
652286866Sjasone	    chunksize, zero, true);
653286866Sjasone	if (chunk != NULL) {
654299587Sjasone		if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
655299587Sjasone			chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
656286866Sjasone			    chunksize, true);
657286866Sjasone			return (NULL);
658286866Sjasone		}
659286866Sjasone		*commit = true;
660286866Sjasone	}
661286866Sjasone	if (chunk == NULL) {
662299587Sjasone		chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
663299587Sjasone		    &chunk_hooks, zero, commit);
664286866Sjasone	}
665286866Sjasone
666286866Sjasone	if (config_stats && chunk != NULL) {
667286866Sjasone		arena->stats.mapped += chunksize;
668286866Sjasone		arena->stats.metadata_mapped += (map_bias << LG_PAGE);
669286866Sjasone	}
670286866Sjasone
671286866Sjasone	return (chunk);
672286866Sjasone}
673286866Sjasone
674286866Sjasonestatic arena_chunk_t *
675299587Sjasonearena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
676261071Sjasone{
677261071Sjasone	arena_chunk_t *chunk;
678286866Sjasone	bool zero, commit;
679286866Sjasone	size_t flag_unzeroed, flag_decommitted, i;
680234370Sjasone
681261071Sjasone	assert(arena->spare == NULL);
682242844Sjasone
683261071Sjasone	zero = false;
684286866Sjasone	commit = false;
685299587Sjasone	chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
686261071Sjasone	if (chunk == NULL)
687261071Sjasone		return (NULL);
688261071Sjasone
689261071Sjasone	/*
690261071Sjasone	 * Initialize the map to contain one maximal free untouched run.  Mark
691299587Sjasone	 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
692299587Sjasone	 * or decommitted chunk.
693261071Sjasone	 */
694286866Sjasone	flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
695286866Sjasone	flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
696286866Sjasone	arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
697286866Sjasone	    flag_unzeroed | flag_decommitted);
698261071Sjasone	/*
699261071Sjasone	 * There is no need to initialize the internal page map entries unless
700261071Sjasone	 * the chunk is not zeroed.
701261071Sjasone	 */
702286866Sjasone	if (!zero) {
703286866Sjasone		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
704299587Sjasone		    (void *)arena_bitselm_get_const(chunk, map_bias+1),
705299587Sjasone		    (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
706299587Sjasone		    chunk_npages-1) -
707299587Sjasone		    (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
708261071Sjasone		for (i = map_bias+1; i < chunk_npages-1; i++)
709286866Sjasone			arena_mapbits_internal_set(chunk, i, flag_unzeroed);
710261071Sjasone	} else {
711286866Sjasone		JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
712299587Sjasone		    *)arena_bitselm_get_const(chunk, map_bias+1),
713299587Sjasone		    (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
714299587Sjasone		    chunk_npages-1) -
715299587Sjasone		    (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
716261071Sjasone		if (config_debug) {
717261071Sjasone			for (i = map_bias+1; i < chunk_npages-1; i++) {
718261071Sjasone				assert(arena_mapbits_unzeroed_get(chunk, i) ==
719286866Sjasone				    flag_unzeroed);
720235238Sjasone			}
721234370Sjasone		}
722234370Sjasone	}
723286866Sjasone	arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
724286866Sjasone	    flag_unzeroed);
725234370Sjasone
726261071Sjasone	return (chunk);
727261071Sjasone}
728261071Sjasone
729261071Sjasonestatic arena_chunk_t *
730299587Sjasonearena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
731261071Sjasone{
732261071Sjasone	arena_chunk_t *chunk;
733261071Sjasone
734261071Sjasone	if (arena->spare != NULL)
735261071Sjasone		chunk = arena_chunk_init_spare(arena);
736263974Sjasone	else {
737299587Sjasone		chunk = arena_chunk_init_hard(tsdn, arena);
738263974Sjasone		if (chunk == NULL)
739263974Sjasone			return (NULL);
740263974Sjasone	}
741261071Sjasone
742299587Sjasone	ql_elm_new(&chunk->node, ql_link);
743299587Sjasone	ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
744286866Sjasone	arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
745242844Sjasone
746234370Sjasone	return (chunk);
747234370Sjasone}
748234370Sjasone
749234370Sjasonestatic void
750299587Sjasonearena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
751234370Sjasone{
752299587Sjasone	bool committed;
753299587Sjasone	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
754286866Sjasone
755299587Sjasone	chunk_deregister(chunk, &chunk->node);
756299587Sjasone
757299587Sjasone	committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
758299587Sjasone	if (!committed) {
759299587Sjasone		/*
760299587Sjasone		 * Decommit the header.  Mark the chunk as decommitted even if
761299587Sjasone		 * header decommit fails, since treating a partially committed
762299587Sjasone		 * chunk as committed has a high potential for causing later
763299587Sjasone		 * access of decommitted memory.
764299587Sjasone		 */
765299587Sjasone		chunk_hooks = chunk_hooks_get(tsdn, arena);
766299587Sjasone		chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
767299587Sjasone		    arena->ind);
768299587Sjasone	}
769299587Sjasone
770299587Sjasone	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
771299587Sjasone	    committed);
772299587Sjasone
773299587Sjasone	if (config_stats) {
774299587Sjasone		arena->stats.mapped -= chunksize;
775299587Sjasone		arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
776299587Sjasone	}
777299587Sjasone}
778299587Sjasone
779299587Sjasonestatic void
780299587Sjasonearena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
781299587Sjasone{
782299587Sjasone
783299587Sjasone	assert(arena->spare != spare);
784299587Sjasone
785299587Sjasone	if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
786299587Sjasone		arena_run_dirty_remove(arena, spare, map_bias,
787299587Sjasone		    chunk_npages-map_bias);
788299587Sjasone	}
789299587Sjasone
790299587Sjasone	arena_chunk_discard(tsdn, arena, spare);
791299587Sjasone}
792299587Sjasone
793299587Sjasonestatic void
794299587Sjasonearena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
795299587Sjasone{
796299587Sjasone	arena_chunk_t *spare;
797299587Sjasone
798235322Sjasone	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
799235322Sjasone	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
800235322Sjasone	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
801286866Sjasone	    arena_maxrun);
802235322Sjasone	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
803286866Sjasone	    arena_maxrun);
804235322Sjasone	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
805235322Sjasone	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
806286866Sjasone	assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
807286866Sjasone	    arena_mapbits_decommitted_get(chunk, chunk_npages-1));
808235322Sjasone
809296221Sjasone	/* Remove run from runs_avail, so that the arena does not use it. */
810286866Sjasone	arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
811234370Sjasone
812299587Sjasone	ql_remove(&arena->achunks, &chunk->node, ql_link);
813299587Sjasone	spare = arena->spare;
814299587Sjasone	arena->spare = chunk;
815299587Sjasone	if (spare != NULL)
816299587Sjasone		arena_spare_discard(tsdn, arena, spare);
817234370Sjasone}
818234370Sjasone
819286866Sjasonestatic void
820286866Sjasonearena_huge_malloc_stats_update(arena_t *arena, size_t usize)
821234370Sjasone{
822288090Sjasone	szind_t index = size2index(usize) - nlclasses - NBINS;
823234370Sjasone
824286866Sjasone	cassert(config_stats);
825234370Sjasone
826286866Sjasone	arena->stats.nmalloc_huge++;
827286866Sjasone	arena->stats.allocated_huge += usize;
828286866Sjasone	arena->stats.hstats[index].nmalloc++;
829286866Sjasone	arena->stats.hstats[index].curhchunks++;
830286866Sjasone}
831286866Sjasone
832286866Sjasonestatic void
833286866Sjasonearena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
834286866Sjasone{
835288090Sjasone	szind_t index = size2index(usize) - nlclasses - NBINS;
836286866Sjasone
837286866Sjasone	cassert(config_stats);
838286866Sjasone
839286866Sjasone	arena->stats.nmalloc_huge--;
840286866Sjasone	arena->stats.allocated_huge -= usize;
841286866Sjasone	arena->stats.hstats[index].nmalloc--;
842286866Sjasone	arena->stats.hstats[index].curhchunks--;
843286866Sjasone}
844286866Sjasone
845286866Sjasonestatic void
846286866Sjasonearena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
847286866Sjasone{
848288090Sjasone	szind_t index = size2index(usize) - nlclasses - NBINS;
849286866Sjasone
850286866Sjasone	cassert(config_stats);
851286866Sjasone
852286866Sjasone	arena->stats.ndalloc_huge++;
853286866Sjasone	arena->stats.allocated_huge -= usize;
854286866Sjasone	arena->stats.hstats[index].ndalloc++;
855286866Sjasone	arena->stats.hstats[index].curhchunks--;
856286866Sjasone}
857286866Sjasone
858286866Sjasonestatic void
859299587Sjasonearena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
860299587Sjasone{
861299587Sjasone	szind_t index = size2index(usize) - nlclasses - NBINS;
862299587Sjasone
863299587Sjasone	cassert(config_stats);
864299587Sjasone
865299587Sjasone	arena->stats.ndalloc_huge++;
866299587Sjasone	arena->stats.hstats[index].ndalloc--;
867299587Sjasone}
868299587Sjasone
869299587Sjasonestatic void
870286866Sjasonearena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
871286866Sjasone{
872288090Sjasone	szind_t index = size2index(usize) - nlclasses - NBINS;
873286866Sjasone
874286866Sjasone	cassert(config_stats);
875286866Sjasone
876286866Sjasone	arena->stats.ndalloc_huge--;
877286866Sjasone	arena->stats.allocated_huge += usize;
878286866Sjasone	arena->stats.hstats[index].ndalloc--;
879286866Sjasone	arena->stats.hstats[index].curhchunks++;
880286866Sjasone}
881286866Sjasone
882286866Sjasonestatic void
883286866Sjasonearena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
884286866Sjasone{
885286866Sjasone
886286866Sjasone	arena_huge_dalloc_stats_update(arena, oldsize);
887286866Sjasone	arena_huge_malloc_stats_update(arena, usize);
888286866Sjasone}
889286866Sjasone
890286866Sjasonestatic void
891286866Sjasonearena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
892286866Sjasone    size_t usize)
893286866Sjasone{
894286866Sjasone
895286866Sjasone	arena_huge_dalloc_stats_update_undo(arena, oldsize);
896286866Sjasone	arena_huge_malloc_stats_update_undo(arena, usize);
897286866Sjasone}
898286866Sjasone
899286866Sjasoneextent_node_t *
900299587Sjasonearena_node_alloc(tsdn_t *tsdn, arena_t *arena)
901286866Sjasone{
902286866Sjasone	extent_node_t *node;
903286866Sjasone
904299587Sjasone	malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
905286866Sjasone	node = ql_last(&arena->node_cache, ql_link);
906286866Sjasone	if (node == NULL) {
907299587Sjasone		malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
908299587Sjasone		return (base_alloc(tsdn, sizeof(extent_node_t)));
909234370Sjasone	}
910286866Sjasone	ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
911299587Sjasone	malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
912286866Sjasone	return (node);
913286866Sjasone}
914234370Sjasone
915286866Sjasonevoid
916299587Sjasonearena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
917286866Sjasone{
918286866Sjasone
919299587Sjasone	malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
920286866Sjasone	ql_elm_new(node, ql_link);
921286866Sjasone	ql_tail_insert(&arena->node_cache, node, ql_link);
922299587Sjasone	malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
923235322Sjasone}
924235322Sjasone
925286866Sjasonestatic void *
926299587Sjasonearena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
927299587Sjasone    chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
928299587Sjasone    size_t csize)
929286866Sjasone{
930286866Sjasone	void *ret;
931286866Sjasone	bool commit = true;
932286866Sjasone
933299587Sjasone	ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
934299587Sjasone	    alignment, zero, &commit);
935286866Sjasone	if (ret == NULL) {
936286866Sjasone		/* Revert optimistic stats updates. */
937299587Sjasone		malloc_mutex_lock(tsdn, &arena->lock);
938286866Sjasone		if (config_stats) {
939286866Sjasone			arena_huge_malloc_stats_update_undo(arena, usize);
940286866Sjasone			arena->stats.mapped -= usize;
941286866Sjasone		}
942296221Sjasone		arena_nactive_sub(arena, usize >> LG_PAGE);
943299587Sjasone		malloc_mutex_unlock(tsdn, &arena->lock);
944286866Sjasone	}
945286866Sjasone
946286866Sjasone	return (ret);
947286866Sjasone}
948286866Sjasone
949286866Sjasonevoid *
950299587Sjasonearena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
951299587Sjasone    size_t alignment, bool *zero)
952286866Sjasone{
953286866Sjasone	void *ret;
954286866Sjasone	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
955286866Sjasone	size_t csize = CHUNK_CEILING(usize);
956286866Sjasone
957299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
958286866Sjasone
959286866Sjasone	/* Optimistically update stats. */
960286866Sjasone	if (config_stats) {
961286866Sjasone		arena_huge_malloc_stats_update(arena, usize);
962286866Sjasone		arena->stats.mapped += usize;
963286866Sjasone	}
964296221Sjasone	arena_nactive_add(arena, usize >> LG_PAGE);
965286866Sjasone
966299587Sjasone	ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
967299587Sjasone	    alignment, zero, true);
968299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
969286866Sjasone	if (ret == NULL) {
970299587Sjasone		ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
971299587Sjasone		    usize, alignment, zero, csize);
972286866Sjasone	}
973286866Sjasone
974286866Sjasone	return (ret);
975286866Sjasone}
976286866Sjasone
977286866Sjasonevoid
978299587Sjasonearena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
979286866Sjasone{
980286866Sjasone	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
981286866Sjasone	size_t csize;
982286866Sjasone
983286866Sjasone	csize = CHUNK_CEILING(usize);
984299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
985286866Sjasone	if (config_stats) {
986286866Sjasone		arena_huge_dalloc_stats_update(arena, usize);
987286866Sjasone		arena->stats.mapped -= usize;
988286866Sjasone	}
989296221Sjasone	arena_nactive_sub(arena, usize >> LG_PAGE);
990286866Sjasone
991299587Sjasone	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
992299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
993286866Sjasone}
994286866Sjasone
995286866Sjasonevoid
996299587Sjasonearena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
997299587Sjasone    size_t oldsize, size_t usize)
998286866Sjasone{
999286866Sjasone
1000286866Sjasone	assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
1001286866Sjasone	assert(oldsize != usize);
1002286866Sjasone
1003299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
1004286866Sjasone	if (config_stats)
1005286866Sjasone		arena_huge_ralloc_stats_update(arena, oldsize, usize);
1006296221Sjasone	if (oldsize < usize)
1007296221Sjasone		arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
1008296221Sjasone	else
1009296221Sjasone		arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
1010299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
1011286866Sjasone}
1012286866Sjasone
1013286866Sjasonevoid
1014299587Sjasonearena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
1015299587Sjasone    size_t oldsize, size_t usize)
1016286866Sjasone{
1017286866Sjasone	size_t udiff = oldsize - usize;
1018286866Sjasone	size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
1019286866Sjasone
1020299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
1021286866Sjasone	if (config_stats) {
1022286866Sjasone		arena_huge_ralloc_stats_update(arena, oldsize, usize);
1023296221Sjasone		if (cdiff != 0)
1024286866Sjasone			arena->stats.mapped -= cdiff;
1025286866Sjasone	}
1026296221Sjasone	arena_nactive_sub(arena, udiff >> LG_PAGE);
1027286866Sjasone
1028286866Sjasone	if (cdiff != 0) {
1029286866Sjasone		chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
1030286866Sjasone		void *nchunk = (void *)((uintptr_t)chunk +
1031286866Sjasone		    CHUNK_CEILING(usize));
1032286866Sjasone
1033299587Sjasone		chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1034299587Sjasone		    true);
1035286866Sjasone	}
1036299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
1037286866Sjasone}
1038286866Sjasone
1039286866Sjasonestatic bool
1040299587Sjasonearena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
1041299587Sjasone    chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
1042299587Sjasone    bool *zero, void *nchunk, size_t udiff, size_t cdiff)
1043286866Sjasone{
1044286866Sjasone	bool err;
1045286866Sjasone	bool commit = true;
1046286866Sjasone
1047299587Sjasone	err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1048299587Sjasone	    chunksize, zero, &commit) == NULL);
1049286866Sjasone	if (err) {
1050286866Sjasone		/* Revert optimistic stats updates. */
1051299587Sjasone		malloc_mutex_lock(tsdn, &arena->lock);
1052286866Sjasone		if (config_stats) {
1053286866Sjasone			arena_huge_ralloc_stats_update_undo(arena, oldsize,
1054286866Sjasone			    usize);
1055286866Sjasone			arena->stats.mapped -= cdiff;
1056286866Sjasone		}
1057296221Sjasone		arena_nactive_sub(arena, udiff >> LG_PAGE);
1058299587Sjasone		malloc_mutex_unlock(tsdn, &arena->lock);
1059286866Sjasone	} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1060286866Sjasone	    cdiff, true, arena->ind)) {
1061299587Sjasone		chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1062299587Sjasone		    *zero, true);
1063286866Sjasone		err = true;
1064286866Sjasone	}
1065286866Sjasone	return (err);
1066286866Sjasone}
1067286866Sjasone
1068286866Sjasonebool
1069299587Sjasonearena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
1070299587Sjasone    size_t oldsize, size_t usize, bool *zero)
1071286866Sjasone{
1072286866Sjasone	bool err;
1073299587Sjasone	chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1074286866Sjasone	void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
1075286866Sjasone	size_t udiff = usize - oldsize;
1076286866Sjasone	size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1077286866Sjasone
1078299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
1079286866Sjasone
1080286866Sjasone	/* Optimistically update stats. */
1081286866Sjasone	if (config_stats) {
1082286866Sjasone		arena_huge_ralloc_stats_update(arena, oldsize, usize);
1083286866Sjasone		arena->stats.mapped += cdiff;
1084286866Sjasone	}
1085296221Sjasone	arena_nactive_add(arena, udiff >> LG_PAGE);
1086286866Sjasone
1087299587Sjasone	err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1088286866Sjasone	    chunksize, zero, true) == NULL);
1089299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
1090286866Sjasone	if (err) {
1091299587Sjasone		err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
1092299587Sjasone		    &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
1093286866Sjasone		    cdiff);
1094286866Sjasone	} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1095286866Sjasone	    cdiff, true, arena->ind)) {
1096299587Sjasone		chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1097299587Sjasone		    *zero, true);
1098286866Sjasone		err = true;
1099286866Sjasone	}
1100286866Sjasone
1101286866Sjasone	return (err);
1102286866Sjasone}
1103286866Sjasone
1104286866Sjasone/*
1105286866Sjasone * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1106296221Sjasone * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1107296221Sjasone * same size.
1108286866Sjasone */
1109235322Sjasonestatic arena_run_t *
1110286866Sjasonearena_run_first_best_fit(arena_t *arena, size_t size)
1111286866Sjasone{
1112296221Sjasone	szind_t ind, i;
1113296221Sjasone
1114296221Sjasone	ind = size2index(run_quantize_ceil(size));
1115296221Sjasone	for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
1116299587Sjasone		arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
1117296221Sjasone		    arena_runs_avail_get(arena, i));
1118296221Sjasone		if (miscelm != NULL)
1119296221Sjasone			return (&miscelm->run);
1120296221Sjasone	}
1121296221Sjasone
1122296221Sjasone	return (NULL);
1123286866Sjasone}
1124286866Sjasone
1125286866Sjasonestatic arena_run_t *
1126286866Sjasonearena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
1127286866Sjasone{
1128286866Sjasone	arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
1129286866Sjasone	if (run != NULL) {
1130286866Sjasone		if (arena_run_split_large(arena, run, size, zero))
1131286866Sjasone			run = NULL;
1132286866Sjasone	}
1133286866Sjasone	return (run);
1134286866Sjasone}
1135286866Sjasone
1136286866Sjasonestatic arena_run_t *
1137299587Sjasonearena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
1138235322Sjasone{
1139235322Sjasone	arena_chunk_t *chunk;
1140235322Sjasone	arena_run_t *run;
1141235322Sjasone
1142286866Sjasone	assert(size <= arena_maxrun);
1143286866Sjasone	assert(size == PAGE_CEILING(size));
1144235322Sjasone
1145235322Sjasone	/* Search the arena's chunks for the lowest best fit. */
1146261071Sjasone	run = arena_run_alloc_large_helper(arena, size, zero);
1147235322Sjasone	if (run != NULL)
1148235322Sjasone		return (run);
1149235322Sjasone
1150234370Sjasone	/*
1151234370Sjasone	 * No usable runs.  Create a new chunk from which to allocate the run.
1152234370Sjasone	 */
1153299587Sjasone	chunk = arena_chunk_alloc(tsdn, arena);
1154234370Sjasone	if (chunk != NULL) {
1155299587Sjasone		run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1156286866Sjasone		if (arena_run_split_large(arena, run, size, zero))
1157286866Sjasone			run = NULL;
1158234370Sjasone		return (run);
1159234370Sjasone	}
1160234370Sjasone
1161234370Sjasone	/*
1162234370Sjasone	 * arena_chunk_alloc() failed, but another thread may have made
1163234370Sjasone	 * sufficient memory available while this one dropped arena->lock in
1164234370Sjasone	 * arena_chunk_alloc(), so search one more time.
1165234370Sjasone	 */
1166261071Sjasone	return (arena_run_alloc_large_helper(arena, size, zero));
1167234370Sjasone}
1168234370Sjasone
1169261071Sjasonestatic arena_run_t *
1170288090Sjasonearena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1171261071Sjasone{
1172286866Sjasone	arena_run_t *run = arena_run_first_best_fit(arena, size);
1173286866Sjasone	if (run != NULL) {
1174286866Sjasone		if (arena_run_split_small(arena, run, size, binind))
1175286866Sjasone			run = NULL;
1176261071Sjasone	}
1177286866Sjasone	return (run);
1178261071Sjasone}
1179261071Sjasone
1180261071Sjasonestatic arena_run_t *
1181299587Sjasonearena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
1182261071Sjasone{
1183261071Sjasone	arena_chunk_t *chunk;
1184261071Sjasone	arena_run_t *run;
1185261071Sjasone
1186286866Sjasone	assert(size <= arena_maxrun);
1187286866Sjasone	assert(size == PAGE_CEILING(size));
1188261071Sjasone	assert(binind != BININD_INVALID);
1189261071Sjasone
1190261071Sjasone	/* Search the arena's chunks for the lowest best fit. */
1191261071Sjasone	run = arena_run_alloc_small_helper(arena, size, binind);
1192261071Sjasone	if (run != NULL)
1193261071Sjasone		return (run);
1194261071Sjasone
1195261071Sjasone	/*
1196261071Sjasone	 * No usable runs.  Create a new chunk from which to allocate the run.
1197261071Sjasone	 */
1198299587Sjasone	chunk = arena_chunk_alloc(tsdn, arena);
1199261071Sjasone	if (chunk != NULL) {
1200299587Sjasone		run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1201286866Sjasone		if (arena_run_split_small(arena, run, size, binind))
1202286866Sjasone			run = NULL;
1203261071Sjasone		return (run);
1204261071Sjasone	}
1205261071Sjasone
1206261071Sjasone	/*
1207261071Sjasone	 * arena_chunk_alloc() failed, but another thread may have made
1208261071Sjasone	 * sufficient memory available while this one dropped arena->lock in
1209261071Sjasone	 * arena_chunk_alloc(), so search one more time.
1210261071Sjasone	 */
1211261071Sjasone	return (arena_run_alloc_small_helper(arena, size, binind));
1212261071Sjasone}
1213261071Sjasone
1214286866Sjasonestatic bool
1215286866Sjasonearena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1216286866Sjasone{
1217286866Sjasone
1218286866Sjasone	return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1219286866Sjasone	    << 3));
1220286866Sjasone}
1221286866Sjasone
1222286866Sjasonessize_t
1223299587Sjasonearena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
1224286866Sjasone{
1225286866Sjasone	ssize_t lg_dirty_mult;
1226286866Sjasone
1227299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
1228286866Sjasone	lg_dirty_mult = arena->lg_dirty_mult;
1229299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
1230286866Sjasone
1231286866Sjasone	return (lg_dirty_mult);
1232286866Sjasone}
1233286866Sjasone
1234286866Sjasonebool
1235299587Sjasonearena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
1236286866Sjasone{
1237286866Sjasone
1238286866Sjasone	if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1239286866Sjasone		return (true);
1240286866Sjasone
1241299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
1242286866Sjasone	arena->lg_dirty_mult = lg_dirty_mult;
1243299587Sjasone	arena_maybe_purge(tsdn, arena);
1244299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
1245286866Sjasone
1246286866Sjasone	return (false);
1247286866Sjasone}
1248286866Sjasone
1249296221Sjasonestatic void
1250296221Sjasonearena_decay_deadline_init(arena_t *arena)
1251234370Sjasone{
1252234370Sjasone
1253296221Sjasone	assert(opt_purge == purge_mode_decay);
1254296221Sjasone
1255296221Sjasone	/*
1256296221Sjasone	 * Generate a new deadline that is uniformly random within the next
1257296221Sjasone	 * epoch after the current one.
1258296221Sjasone	 */
1259296221Sjasone	nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
1260296221Sjasone	nstime_add(&arena->decay_deadline, &arena->decay_interval);
1261296221Sjasone	if (arena->decay_time > 0) {
1262296221Sjasone		nstime_t jitter;
1263296221Sjasone
1264296221Sjasone		nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
1265296221Sjasone		    nstime_ns(&arena->decay_interval)));
1266296221Sjasone		nstime_add(&arena->decay_deadline, &jitter);
1267296221Sjasone	}
1268296221Sjasone}
1269296221Sjasone
1270296221Sjasonestatic bool
1271296221Sjasonearena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
1272296221Sjasone{
1273296221Sjasone
1274296221Sjasone	assert(opt_purge == purge_mode_decay);
1275296221Sjasone
1276296221Sjasone	return (nstime_compare(&arena->decay_deadline, time) <= 0);
1277296221Sjasone}
1278296221Sjasone
1279296221Sjasonestatic size_t
1280296221Sjasonearena_decay_backlog_npages_limit(const arena_t *arena)
1281296221Sjasone{
1282296221Sjasone	static const uint64_t h_steps[] = {
1283296221Sjasone#define	STEP(step, h, x, y) \
1284296221Sjasone		h,
1285296221Sjasone		SMOOTHSTEP
1286296221Sjasone#undef STEP
1287296221Sjasone	};
1288296221Sjasone	uint64_t sum;
1289296221Sjasone	size_t npages_limit_backlog;
1290296221Sjasone	unsigned i;
1291296221Sjasone
1292296221Sjasone	assert(opt_purge == purge_mode_decay);
1293296221Sjasone
1294296221Sjasone	/*
1295296221Sjasone	 * For each element of decay_backlog, multiply by the corresponding
1296296221Sjasone	 * fixed-point smoothstep decay factor.  Sum the products, then divide
1297296221Sjasone	 * to round down to the nearest whole number of pages.
1298296221Sjasone	 */
1299296221Sjasone	sum = 0;
1300296221Sjasone	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1301296221Sjasone		sum += arena->decay_backlog[i] * h_steps[i];
1302299587Sjasone	npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
1303296221Sjasone
1304296221Sjasone	return (npages_limit_backlog);
1305296221Sjasone}
1306296221Sjasone
1307296221Sjasonestatic void
1308296221Sjasonearena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
1309296221Sjasone{
1310299587Sjasone	uint64_t nadvance_u64;
1311296221Sjasone	nstime_t delta;
1312296221Sjasone	size_t ndirty_delta;
1313296221Sjasone
1314296221Sjasone	assert(opt_purge == purge_mode_decay);
1315296221Sjasone	assert(arena_decay_deadline_reached(arena, time));
1316296221Sjasone
1317296221Sjasone	nstime_copy(&delta, time);
1318296221Sjasone	nstime_subtract(&delta, &arena->decay_epoch);
1319299587Sjasone	nadvance_u64 = nstime_divide(&delta, &arena->decay_interval);
1320299587Sjasone	assert(nadvance_u64 > 0);
1321296221Sjasone
1322299587Sjasone	/* Add nadvance_u64 decay intervals to epoch. */
1323296221Sjasone	nstime_copy(&delta, &arena->decay_interval);
1324299587Sjasone	nstime_imultiply(&delta, nadvance_u64);
1325296221Sjasone	nstime_add(&arena->decay_epoch, &delta);
1326296221Sjasone
1327296221Sjasone	/* Set a new deadline. */
1328296221Sjasone	arena_decay_deadline_init(arena);
1329296221Sjasone
1330296221Sjasone	/* Update the backlog. */
1331299587Sjasone	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
1332296221Sjasone		memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1333296221Sjasone		    sizeof(size_t));
1334296221Sjasone	} else {
1335299587Sjasone		size_t nadvance_z = (size_t)nadvance_u64;
1336299587Sjasone
1337299587Sjasone		assert((uint64_t)nadvance_z == nadvance_u64);
1338299587Sjasone
1339299587Sjasone		memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z],
1340299587Sjasone		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1341299587Sjasone		if (nadvance_z > 1) {
1342296221Sjasone			memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
1343299587Sjasone			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
1344296221Sjasone		}
1345296221Sjasone	}
1346296221Sjasone	ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
1347296221Sjasone	    arena->decay_ndirty : 0;
1348296221Sjasone	arena->decay_ndirty = arena->ndirty;
1349296221Sjasone	arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1350296221Sjasone	arena->decay_backlog_npages_limit =
1351296221Sjasone	    arena_decay_backlog_npages_limit(arena);
1352296221Sjasone}
1353296221Sjasone
1354296221Sjasonestatic size_t
1355296221Sjasonearena_decay_npages_limit(arena_t *arena)
1356296221Sjasone{
1357296221Sjasone	size_t npages_limit;
1358296221Sjasone
1359296221Sjasone	assert(opt_purge == purge_mode_decay);
1360296221Sjasone
1361296221Sjasone	npages_limit = arena->decay_backlog_npages_limit;
1362296221Sjasone
1363296221Sjasone	/* Add in any dirty pages created during the current epoch. */
1364296221Sjasone	if (arena->ndirty > arena->decay_ndirty)
1365296221Sjasone		npages_limit += arena->ndirty - arena->decay_ndirty;
1366296221Sjasone
1367296221Sjasone	return (npages_limit);
1368296221Sjasone}
1369296221Sjasone
1370296221Sjasonestatic void
1371296221Sjasonearena_decay_init(arena_t *arena, ssize_t decay_time)
1372296221Sjasone{
1373296221Sjasone
1374296221Sjasone	arena->decay_time = decay_time;
1375296221Sjasone	if (decay_time > 0) {
1376296221Sjasone		nstime_init2(&arena->decay_interval, decay_time, 0);
1377296221Sjasone		nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
1378296221Sjasone	}
1379296221Sjasone
1380296221Sjasone	nstime_init(&arena->decay_epoch, 0);
1381296221Sjasone	nstime_update(&arena->decay_epoch);
1382296221Sjasone	arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
1383296221Sjasone	arena_decay_deadline_init(arena);
1384296221Sjasone	arena->decay_ndirty = arena->ndirty;
1385296221Sjasone	arena->decay_backlog_npages_limit = 0;
1386296221Sjasone	memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1387296221Sjasone}
1388296221Sjasone
1389296221Sjasonestatic bool
1390296221Sjasonearena_decay_time_valid(ssize_t decay_time)
1391296221Sjasone{
1392296221Sjasone
1393296334Sjasone	if (decay_time < -1)
1394296334Sjasone		return (false);
1395296334Sjasone	if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1396296334Sjasone		return (true);
1397296334Sjasone	return (false);
1398296221Sjasone}
1399296221Sjasone
1400296221Sjasonessize_t
1401299587Sjasonearena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
1402296221Sjasone{
1403296221Sjasone	ssize_t decay_time;
1404296221Sjasone
1405299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
1406296221Sjasone	decay_time = arena->decay_time;
1407299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
1408296221Sjasone
1409296221Sjasone	return (decay_time);
1410296221Sjasone}
1411296221Sjasone
1412296221Sjasonebool
1413299587Sjasonearena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
1414296221Sjasone{
1415296221Sjasone
1416296221Sjasone	if (!arena_decay_time_valid(decay_time))
1417296221Sjasone		return (true);
1418296221Sjasone
1419299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
1420296221Sjasone	/*
1421296221Sjasone	 * Restart decay backlog from scratch, which may cause many dirty pages
1422296221Sjasone	 * to be immediately purged.  It would conceptually be possible to map
1423296221Sjasone	 * the old backlog onto the new backlog, but there is no justification
1424296221Sjasone	 * for such complexity since decay_time changes are intended to be
1425296221Sjasone	 * infrequent, either between the {-1, 0, >0} states, or a one-time
1426296221Sjasone	 * arbitrary change during initial arena configuration.
1427296221Sjasone	 */
1428296221Sjasone	arena_decay_init(arena, decay_time);
1429299587Sjasone	arena_maybe_purge(tsdn, arena);
1430299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
1431296221Sjasone
1432296221Sjasone	return (false);
1433296221Sjasone}
1434296221Sjasone
1435296221Sjasonestatic void
1436299587Sjasonearena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
1437296221Sjasone{
1438296221Sjasone
1439296221Sjasone	assert(opt_purge == purge_mode_ratio);
1440296221Sjasone
1441242844Sjasone	/* Don't purge if the option is disabled. */
1442286866Sjasone	if (arena->lg_dirty_mult < 0)
1443242844Sjasone		return;
1444296221Sjasone
1445242844Sjasone	/*
1446286866Sjasone	 * Iterate, since preventing recursive purging could otherwise leave too
1447286866Sjasone	 * many dirty pages.
1448242844Sjasone	 */
1449286866Sjasone	while (true) {
1450286866Sjasone		size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1451286866Sjasone		if (threshold < chunk_npages)
1452286866Sjasone			threshold = chunk_npages;
1453286866Sjasone		/*
1454286866Sjasone		 * Don't purge unless the number of purgeable pages exceeds the
1455286866Sjasone		 * threshold.
1456286866Sjasone		 */
1457286866Sjasone		if (arena->ndirty <= threshold)
1458286866Sjasone			return;
1459299587Sjasone		arena_purge_to_limit(tsdn, arena, threshold);
1460286866Sjasone	}
1461234370Sjasone}
1462234370Sjasone
1463296221Sjasonestatic void
1464299587Sjasonearena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
1465296221Sjasone{
1466296221Sjasone	nstime_t time;
1467296221Sjasone	size_t ndirty_limit;
1468296221Sjasone
1469296221Sjasone	assert(opt_purge == purge_mode_decay);
1470296221Sjasone
1471296221Sjasone	/* Purge all or nothing if the option is disabled. */
1472296221Sjasone	if (arena->decay_time <= 0) {
1473296221Sjasone		if (arena->decay_time == 0)
1474299587Sjasone			arena_purge_to_limit(tsdn, arena, 0);
1475296221Sjasone		return;
1476296221Sjasone	}
1477296221Sjasone
1478296221Sjasone	nstime_copy(&time, &arena->decay_epoch);
1479296221Sjasone	if (unlikely(nstime_update(&time))) {
1480296221Sjasone		/* Time went backwards.  Force an epoch advance. */
1481296221Sjasone		nstime_copy(&time, &arena->decay_deadline);
1482296221Sjasone	}
1483296221Sjasone
1484296221Sjasone	if (arena_decay_deadline_reached(arena, &time))
1485296221Sjasone		arena_decay_epoch_advance(arena, &time);
1486296221Sjasone
1487296221Sjasone	ndirty_limit = arena_decay_npages_limit(arena);
1488296221Sjasone
1489296221Sjasone	/*
1490296221Sjasone	 * Don't try to purge unless the number of purgeable pages exceeds the
1491296221Sjasone	 * current limit.
1492296221Sjasone	 */
1493296221Sjasone	if (arena->ndirty <= ndirty_limit)
1494296221Sjasone		return;
1495299587Sjasone	arena_purge_to_limit(tsdn, arena, ndirty_limit);
1496296221Sjasone}
1497296221Sjasone
1498296221Sjasonevoid
1499299587Sjasonearena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
1500296221Sjasone{
1501296221Sjasone
1502296221Sjasone	/* Don't recursively purge. */
1503296221Sjasone	if (arena->purging)
1504296221Sjasone		return;
1505296221Sjasone
1506296221Sjasone	if (opt_purge == purge_mode_ratio)
1507299587Sjasone		arena_maybe_purge_ratio(tsdn, arena);
1508296221Sjasone	else
1509299587Sjasone		arena_maybe_purge_decay(tsdn, arena);
1510296221Sjasone}
1511296221Sjasone
1512286866Sjasonestatic size_t
1513286866Sjasonearena_dirty_count(arena_t *arena)
1514234370Sjasone{
1515286866Sjasone	size_t ndirty = 0;
1516286866Sjasone	arena_runs_dirty_link_t *rdelm;
1517286866Sjasone	extent_node_t *chunkselm;
1518234370Sjasone
1519286866Sjasone	for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1520286866Sjasone	    chunkselm = qr_next(&arena->chunks_cache, cc_link);
1521286866Sjasone	    rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
1522286866Sjasone		size_t npages;
1523286866Sjasone
1524286866Sjasone		if (rdelm == &chunkselm->rd) {
1525286866Sjasone			npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1526286866Sjasone			chunkselm = qr_next(chunkselm, cc_link);
1527286866Sjasone		} else {
1528286866Sjasone			arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1529286866Sjasone			    rdelm);
1530286866Sjasone			arena_chunk_map_misc_t *miscelm =
1531286866Sjasone			    arena_rd_to_miscelm(rdelm);
1532286866Sjasone			size_t pageind = arena_miscelm_to_pageind(miscelm);
1533286866Sjasone			assert(arena_mapbits_allocated_get(chunk, pageind) ==
1534286866Sjasone			    0);
1535286866Sjasone			assert(arena_mapbits_large_get(chunk, pageind) == 0);
1536286866Sjasone			assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1537286866Sjasone			npages = arena_mapbits_unallocated_size_get(chunk,
1538286866Sjasone			    pageind) >> LG_PAGE;
1539286866Sjasone		}
1540286866Sjasone		ndirty += npages;
1541286866Sjasone	}
1542286866Sjasone
1543286866Sjasone	return (ndirty);
1544261071Sjasone}
1545234370Sjasone
1546261071Sjasonestatic size_t
1547299587Sjasonearena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1548296221Sjasone    size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
1549286866Sjasone    extent_node_t *purge_chunks_sentinel)
1550261071Sjasone{
1551286866Sjasone	arena_runs_dirty_link_t *rdelm, *rdelm_next;
1552286866Sjasone	extent_node_t *chunkselm;
1553286866Sjasone	size_t nstashed = 0;
1554261071Sjasone
1555296221Sjasone	/* Stash runs/chunks according to ndirty_limit. */
1556286866Sjasone	for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1557286866Sjasone	    chunkselm = qr_next(&arena->chunks_cache, cc_link);
1558286866Sjasone	    rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
1559286866Sjasone		size_t npages;
1560286866Sjasone		rdelm_next = qr_next(rdelm, rd_link);
1561286866Sjasone
1562286866Sjasone		if (rdelm == &chunkselm->rd) {
1563286866Sjasone			extent_node_t *chunkselm_next;
1564286866Sjasone			bool zero;
1565286866Sjasone			UNUSED void *chunk;
1566286866Sjasone
1567296221Sjasone			npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1568296221Sjasone			if (opt_purge == purge_mode_decay && arena->ndirty -
1569296221Sjasone			    (nstashed + npages) < ndirty_limit)
1570296221Sjasone				break;
1571296221Sjasone
1572286866Sjasone			chunkselm_next = qr_next(chunkselm, cc_link);
1573286866Sjasone			/*
1574286866Sjasone			 * Allocate.  chunkselm remains valid due to the
1575286866Sjasone			 * dalloc_node=false argument to chunk_alloc_cache().
1576286866Sjasone			 */
1577286866Sjasone			zero = false;
1578299587Sjasone			chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
1579286866Sjasone			    extent_node_addr_get(chunkselm),
1580286866Sjasone			    extent_node_size_get(chunkselm), chunksize, &zero,
1581286866Sjasone			    false);
1582286866Sjasone			assert(chunk == extent_node_addr_get(chunkselm));
1583286866Sjasone			assert(zero == extent_node_zeroed_get(chunkselm));
1584286866Sjasone			extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
1585286866Sjasone			    purge_chunks_sentinel);
1586296221Sjasone			assert(npages == (extent_node_size_get(chunkselm) >>
1587296221Sjasone			    LG_PAGE));
1588286866Sjasone			chunkselm = chunkselm_next;
1589286866Sjasone		} else {
1590286866Sjasone			arena_chunk_t *chunk =
1591286866Sjasone			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1592286866Sjasone			arena_chunk_map_misc_t *miscelm =
1593286866Sjasone			    arena_rd_to_miscelm(rdelm);
1594286866Sjasone			size_t pageind = arena_miscelm_to_pageind(miscelm);
1595286866Sjasone			arena_run_t *run = &miscelm->run;
1596242844Sjasone			size_t run_size =
1597242844Sjasone			    arena_mapbits_unallocated_size_get(chunk, pageind);
1598234370Sjasone
1599242844Sjasone			npages = run_size >> LG_PAGE;
1600296221Sjasone			if (opt_purge == purge_mode_decay && arena->ndirty -
1601296221Sjasone			    (nstashed + npages) < ndirty_limit)
1602296221Sjasone				break;
1603286866Sjasone
1604234370Sjasone			assert(pageind + npages <= chunk_npages);
1605235322Sjasone			assert(arena_mapbits_dirty_get(chunk, pageind) ==
1606235322Sjasone			    arena_mapbits_dirty_get(chunk, pageind+npages-1));
1607234370Sjasone
1608286866Sjasone			/*
1609286866Sjasone			 * If purging the spare chunk's run, make it available
1610286866Sjasone			 * prior to allocation.
1611286866Sjasone			 */
1612286866Sjasone			if (chunk == arena->spare)
1613299587Sjasone				arena_chunk_alloc(tsdn, arena);
1614234370Sjasone
1615286866Sjasone			/* Temporarily allocate the free dirty run. */
1616286866Sjasone			arena_run_split_large(arena, run, run_size, false);
1617286866Sjasone			/* Stash. */
1618286866Sjasone			if (false)
1619286866Sjasone				qr_new(rdelm, rd_link); /* Redundant. */
1620286866Sjasone			else {
1621286866Sjasone				assert(qr_next(rdelm, rd_link) == rdelm);
1622286866Sjasone				assert(qr_prev(rdelm, rd_link) == rdelm);
1623234370Sjasone			}
1624286866Sjasone			qr_meld(purge_runs_sentinel, rdelm, rd_link);
1625286866Sjasone		}
1626234370Sjasone
1627286866Sjasone		nstashed += npages;
1628296221Sjasone		if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1629296221Sjasone		    ndirty_limit)
1630286866Sjasone			break;
1631234370Sjasone	}
1632286866Sjasone
1633286866Sjasone	return (nstashed);
1634261071Sjasone}
1635234370Sjasone
1636261071Sjasonestatic size_t
1637299587Sjasonearena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1638286866Sjasone    arena_runs_dirty_link_t *purge_runs_sentinel,
1639286866Sjasone    extent_node_t *purge_chunks_sentinel)
1640261071Sjasone{
1641286866Sjasone	size_t npurged, nmadvise;
1642286866Sjasone	arena_runs_dirty_link_t *rdelm;
1643286866Sjasone	extent_node_t *chunkselm;
1644261071Sjasone
1645234370Sjasone	if (config_stats)
1646234370Sjasone		nmadvise = 0;
1647242844Sjasone	npurged = 0;
1648242844Sjasone
1649299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
1650286866Sjasone	for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1651286866Sjasone	    chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1652286866Sjasone	    rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
1653286866Sjasone		size_t npages;
1654286866Sjasone
1655286866Sjasone		if (rdelm == &chunkselm->rd) {
1656286866Sjasone			/*
1657286866Sjasone			 * Don't actually purge the chunk here because 1)
1658286866Sjasone			 * chunkselm is embedded in the chunk and must remain
1659286866Sjasone			 * valid, and 2) we deallocate the chunk in
1660286866Sjasone			 * arena_unstash_purged(), where it is destroyed,
1661286866Sjasone			 * decommitted, or purged, depending on chunk
1662286866Sjasone			 * deallocation policy.
1663286866Sjasone			 */
1664286866Sjasone			size_t size = extent_node_size_get(chunkselm);
1665286866Sjasone			npages = size >> LG_PAGE;
1666286866Sjasone			chunkselm = qr_next(chunkselm, cc_link);
1667286866Sjasone		} else {
1668286866Sjasone			size_t pageind, run_size, flag_unzeroed, flags, i;
1669286866Sjasone			bool decommitted;
1670286866Sjasone			arena_chunk_t *chunk =
1671286866Sjasone			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1672286866Sjasone			arena_chunk_map_misc_t *miscelm =
1673286866Sjasone			    arena_rd_to_miscelm(rdelm);
1674286866Sjasone			pageind = arena_miscelm_to_pageind(miscelm);
1675286866Sjasone			run_size = arena_mapbits_large_size_get(chunk, pageind);
1676286866Sjasone			npages = run_size >> LG_PAGE;
1677286866Sjasone
1678286866Sjasone			assert(pageind + npages <= chunk_npages);
1679286866Sjasone			assert(!arena_mapbits_decommitted_get(chunk, pageind));
1680286866Sjasone			assert(!arena_mapbits_decommitted_get(chunk,
1681286866Sjasone			    pageind+npages-1));
1682286866Sjasone			decommitted = !chunk_hooks->decommit(chunk, chunksize,
1683286866Sjasone			    pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1684286866Sjasone			if (decommitted) {
1685286866Sjasone				flag_unzeroed = 0;
1686286866Sjasone				flags = CHUNK_MAP_DECOMMITTED;
1687286866Sjasone			} else {
1688299587Sjasone				flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
1689286866Sjasone				    chunk_hooks, chunk, chunksize, pageind <<
1690286866Sjasone				    LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1691286866Sjasone				flags = flag_unzeroed;
1692286866Sjasone			}
1693286866Sjasone			arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1694286866Sjasone			    flags);
1695286866Sjasone			arena_mapbits_large_set(chunk, pageind, run_size,
1696286866Sjasone			    flags);
1697286866Sjasone
1698286866Sjasone			/*
1699286866Sjasone			 * Set the unzeroed flag for internal pages, now that
1700286866Sjasone			 * chunk_purge_wrapper() has returned whether the pages
1701286866Sjasone			 * were zeroed as a side effect of purging.  This chunk
1702286866Sjasone			 * map modification is safe even though the arena mutex
1703286866Sjasone			 * isn't currently owned by this thread, because the run
1704286866Sjasone			 * is marked as allocated, thus protecting it from being
1705286866Sjasone			 * modified by any other thread.  As long as these
1706286866Sjasone			 * writes don't perturb the first and last elements'
1707286866Sjasone			 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1708286866Sjasone			 */
1709286866Sjasone			for (i = 1; i < npages-1; i++) {
1710286866Sjasone				arena_mapbits_internal_set(chunk, pageind+i,
1711286866Sjasone				    flag_unzeroed);
1712286866Sjasone			}
1713242844Sjasone		}
1714286866Sjasone
1715242844Sjasone		npurged += npages;
1716234370Sjasone		if (config_stats)
1717234370Sjasone			nmadvise++;
1718234370Sjasone	}
1719299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
1720286866Sjasone
1721286866Sjasone	if (config_stats) {
1722234370Sjasone		arena->stats.nmadvise += nmadvise;
1723286866Sjasone		arena->stats.purged += npurged;
1724286866Sjasone	}
1725234370Sjasone
1726261071Sjasone	return (npurged);
1727261071Sjasone}
1728261071Sjasone
1729261071Sjasonestatic void
1730299587Sjasonearena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1731286866Sjasone    arena_runs_dirty_link_t *purge_runs_sentinel,
1732286866Sjasone    extent_node_t *purge_chunks_sentinel)
1733261071Sjasone{
1734286866Sjasone	arena_runs_dirty_link_t *rdelm, *rdelm_next;
1735286866Sjasone	extent_node_t *chunkselm;
1736261071Sjasone
1737286866Sjasone	/* Deallocate chunks/runs. */
1738286866Sjasone	for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1739286866Sjasone	    chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1740286866Sjasone	    rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1741286866Sjasone		rdelm_next = qr_next(rdelm, rd_link);
1742286866Sjasone		if (rdelm == &chunkselm->rd) {
1743286866Sjasone			extent_node_t *chunkselm_next = qr_next(chunkselm,
1744286866Sjasone			    cc_link);
1745286866Sjasone			void *addr = extent_node_addr_get(chunkselm);
1746286866Sjasone			size_t size = extent_node_size_get(chunkselm);
1747286866Sjasone			bool zeroed = extent_node_zeroed_get(chunkselm);
1748286866Sjasone			bool committed = extent_node_committed_get(chunkselm);
1749286866Sjasone			extent_node_dirty_remove(chunkselm);
1750299587Sjasone			arena_node_dalloc(tsdn, arena, chunkselm);
1751286866Sjasone			chunkselm = chunkselm_next;
1752299587Sjasone			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
1753299587Sjasone			    size, zeroed, committed);
1754286866Sjasone		} else {
1755286866Sjasone			arena_chunk_t *chunk =
1756286866Sjasone			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1757286866Sjasone			arena_chunk_map_misc_t *miscelm =
1758286866Sjasone			    arena_rd_to_miscelm(rdelm);
1759286866Sjasone			size_t pageind = arena_miscelm_to_pageind(miscelm);
1760286866Sjasone			bool decommitted = (arena_mapbits_decommitted_get(chunk,
1761286866Sjasone			    pageind) != 0);
1762286866Sjasone			arena_run_t *run = &miscelm->run;
1763286866Sjasone			qr_remove(rdelm, rd_link);
1764299587Sjasone			arena_run_dalloc(tsdn, arena, run, false, true,
1765299587Sjasone			    decommitted);
1766286866Sjasone		}
1767234370Sjasone	}
1768234370Sjasone}
1769234370Sjasone
1770296221Sjasone/*
1771296221Sjasone * NB: ndirty_limit is interpreted differently depending on opt_purge:
1772296221Sjasone *   - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1773296221Sjasone *                       desired state:
1774296221Sjasone *                       (arena->ndirty <= ndirty_limit)
1775296221Sjasone *   - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1776296221Sjasone *                       violating the invariant:
1777296221Sjasone *                       (arena->ndirty >= ndirty_limit)
1778296221Sjasone */
1779286866Sjasonestatic void
1780299587Sjasonearena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
1781242844Sjasone{
1782299587Sjasone	chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1783296221Sjasone	size_t npurge, npurged;
1784286866Sjasone	arena_runs_dirty_link_t purge_runs_sentinel;
1785286866Sjasone	extent_node_t purge_chunks_sentinel;
1786242844Sjasone
1787286866Sjasone	arena->purging = true;
1788261071Sjasone
1789261071Sjasone	/*
1790286866Sjasone	 * Calls to arena_dirty_count() are disabled even for debug builds
1791286866Sjasone	 * because overhead grows nonlinearly as memory usage increases.
1792261071Sjasone	 */
1793286866Sjasone	if (false && config_debug) {
1794286866Sjasone		size_t ndirty = arena_dirty_count(arena);
1795234370Sjasone		assert(ndirty == arena->ndirty);
1796234370Sjasone	}
1797296221Sjasone	assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1798296221Sjasone	    arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
1799234370Sjasone
1800286866Sjasone	qr_new(&purge_runs_sentinel, rd_link);
1801286866Sjasone	extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1802234370Sjasone
1803299587Sjasone	npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
1804286866Sjasone	    &purge_runs_sentinel, &purge_chunks_sentinel);
1805296221Sjasone	if (npurge == 0)
1806296221Sjasone		goto label_return;
1807299587Sjasone	npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
1808299587Sjasone	    &purge_runs_sentinel, &purge_chunks_sentinel);
1809296221Sjasone	assert(npurged == npurge);
1810299587Sjasone	arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
1811286866Sjasone	    &purge_chunks_sentinel);
1812242844Sjasone
1813296221Sjasone	if (config_stats)
1814296221Sjasone		arena->stats.npurge++;
1815296221Sjasone
1816296221Sjasonelabel_return:
1817286866Sjasone	arena->purging = false;
1818234370Sjasone}
1819234370Sjasone
1820234370Sjasonevoid
1821299587Sjasonearena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
1822234370Sjasone{
1823234370Sjasone
1824299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
1825296221Sjasone	if (all)
1826299587Sjasone		arena_purge_to_limit(tsdn, arena, 0);
1827296221Sjasone	else
1828299587Sjasone		arena_maybe_purge(tsdn, arena);
1829299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
1830234370Sjasone}
1831234370Sjasone
1832234370Sjasonestatic void
1833299587Sjasonearena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1834299587Sjasone{
1835299587Sjasone	size_t pageind, npages;
1836299587Sjasone
1837299587Sjasone	cassert(config_prof);
1838299587Sjasone	assert(opt_prof);
1839299587Sjasone
1840299587Sjasone	/*
1841299587Sjasone	 * Iterate over the allocated runs and remove profiled allocations from
1842299587Sjasone	 * the sample set.
1843299587Sjasone	 */
1844299587Sjasone	for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1845299587Sjasone		if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1846299587Sjasone			if (arena_mapbits_large_get(chunk, pageind) != 0) {
1847299587Sjasone				void *ptr = (void *)((uintptr_t)chunk + (pageind
1848299587Sjasone				    << LG_PAGE));
1849299587Sjasone				size_t usize = isalloc(tsd_tsdn(tsd), ptr,
1850299587Sjasone				    config_prof);
1851299587Sjasone
1852299587Sjasone				prof_free(tsd, ptr, usize);
1853299587Sjasone				npages = arena_mapbits_large_size_get(chunk,
1854299587Sjasone				    pageind) >> LG_PAGE;
1855299587Sjasone			} else {
1856299587Sjasone				/* Skip small run. */
1857299587Sjasone				size_t binind = arena_mapbits_binind_get(chunk,
1858299587Sjasone				    pageind);
1859299587Sjasone				arena_bin_info_t *bin_info =
1860299587Sjasone				    &arena_bin_info[binind];
1861299587Sjasone				npages = bin_info->run_size >> LG_PAGE;
1862299587Sjasone			}
1863299587Sjasone		} else {
1864299587Sjasone			/* Skip unallocated run. */
1865299587Sjasone			npages = arena_mapbits_unallocated_size_get(chunk,
1866299587Sjasone			    pageind) >> LG_PAGE;
1867299587Sjasone		}
1868299587Sjasone		assert(pageind + npages <= chunk_npages);
1869299587Sjasone	}
1870299587Sjasone}
1871299587Sjasone
1872299587Sjasonevoid
1873299587Sjasonearena_reset(tsd_t *tsd, arena_t *arena)
1874299587Sjasone{
1875299587Sjasone	unsigned i;
1876299587Sjasone	extent_node_t *node;
1877299587Sjasone
1878299587Sjasone	/*
1879299587Sjasone	 * Locking in this function is unintuitive.  The caller guarantees that
1880299587Sjasone	 * no concurrent operations are happening in this arena, but there are
1881299587Sjasone	 * still reasons that some locking is necessary:
1882299587Sjasone	 *
1883299587Sjasone	 * - Some of the functions in the transitive closure of calls assume
1884299587Sjasone	 *   appropriate locks are held, and in some cases these locks are
1885299587Sjasone	 *   temporarily dropped to avoid lock order reversal or deadlock due to
1886299587Sjasone	 *   reentry.
1887299587Sjasone	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
1888299587Sjasone	 *   strictly speaking this is a "concurrent operation", disallowing
1889299587Sjasone	 *   stats refreshes would impose an inconvenient burden.
1890299587Sjasone	 */
1891299587Sjasone
1892299587Sjasone	/* Remove large allocations from prof sample set. */
1893299587Sjasone	if (config_prof && opt_prof) {
1894299587Sjasone		ql_foreach(node, &arena->achunks, ql_link) {
1895299587Sjasone			arena_achunk_prof_reset(tsd, arena,
1896299587Sjasone			    extent_node_addr_get(node));
1897299587Sjasone		}
1898299587Sjasone	}
1899299587Sjasone
1900299587Sjasone	/* Reset curruns for large size classes. */
1901299587Sjasone	if (config_stats) {
1902299587Sjasone		for (i = 0; i < nlclasses; i++)
1903299587Sjasone			arena->stats.lstats[i].curruns = 0;
1904299587Sjasone	}
1905299587Sjasone
1906299587Sjasone	/* Huge allocations. */
1907299587Sjasone	malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1908299587Sjasone	for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1909299587Sjasone	    ql_last(&arena->huge, ql_link)) {
1910299587Sjasone		void *ptr = extent_node_addr_get(node);
1911299587Sjasone		size_t usize;
1912299587Sjasone
1913299587Sjasone		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1914299587Sjasone		if (config_stats || (config_prof && opt_prof))
1915299587Sjasone			usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1916299587Sjasone		/* Remove huge allocation from prof sample set. */
1917299587Sjasone		if (config_prof && opt_prof)
1918299587Sjasone			prof_free(tsd, ptr, usize);
1919299587Sjasone		huge_dalloc(tsd_tsdn(tsd), ptr);
1920299587Sjasone		malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1921299587Sjasone		/* Cancel out unwanted effects on stats. */
1922299587Sjasone		if (config_stats)
1923299587Sjasone			arena_huge_reset_stats_cancel(arena, usize);
1924299587Sjasone	}
1925299587Sjasone	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1926299587Sjasone
1927299587Sjasone	malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
1928299587Sjasone
1929299587Sjasone	/* Bins. */
1930299587Sjasone	for (i = 0; i < NBINS; i++) {
1931299587Sjasone		arena_bin_t *bin = &arena->bins[i];
1932299587Sjasone		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1933299587Sjasone		bin->runcur = NULL;
1934299587Sjasone		arena_run_heap_new(&bin->runs);
1935299587Sjasone		if (config_stats) {
1936299587Sjasone			bin->stats.curregs = 0;
1937299587Sjasone			bin->stats.curruns = 0;
1938299587Sjasone		}
1939299587Sjasone		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1940299587Sjasone	}
1941299587Sjasone
1942299587Sjasone	/*
1943299587Sjasone	 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
1944299587Sjasone	 * chains directly correspond.
1945299587Sjasone	 */
1946299587Sjasone	qr_new(&arena->runs_dirty, rd_link);
1947299587Sjasone	for (node = qr_next(&arena->chunks_cache, cc_link);
1948299587Sjasone	    node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
1949299587Sjasone		qr_new(&node->rd, rd_link);
1950299587Sjasone		qr_meld(&arena->runs_dirty, &node->rd, rd_link);
1951299587Sjasone	}
1952299587Sjasone
1953299587Sjasone	/* Arena chunks. */
1954299587Sjasone	for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
1955299587Sjasone	    ql_last(&arena->achunks, ql_link)) {
1956299587Sjasone		ql_remove(&arena->achunks, node, ql_link);
1957299587Sjasone		arena_chunk_discard(tsd_tsdn(tsd), arena,
1958299587Sjasone		    extent_node_addr_get(node));
1959299587Sjasone	}
1960299587Sjasone
1961299587Sjasone	/* Spare. */
1962299587Sjasone	if (arena->spare != NULL) {
1963299587Sjasone		arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
1964299587Sjasone		arena->spare = NULL;
1965299587Sjasone	}
1966299587Sjasone
1967299587Sjasone	assert(!arena->purging);
1968299587Sjasone	arena->nactive = 0;
1969299587Sjasone
1970299587Sjasone	for(i = 0; i < runs_avail_nclasses; i++)
1971299587Sjasone		arena_run_heap_new(&arena->runs_avail[i]);
1972299587Sjasone
1973299587Sjasone	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
1974299587Sjasone}
1975299587Sjasone
1976299587Sjasonestatic void
1977261071Sjasonearena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
1978286866Sjasone    size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1979286866Sjasone    size_t flag_decommitted)
1980234370Sjasone{
1981261071Sjasone	size_t size = *p_size;
1982261071Sjasone	size_t run_ind = *p_run_ind;
1983261071Sjasone	size_t run_pages = *p_run_pages;
1984234370Sjasone
1985234370Sjasone	/* Try to coalesce forward. */
1986234370Sjasone	if (run_ind + run_pages < chunk_npages &&
1987235238Sjasone	    arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1988286866Sjasone	    arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1989286866Sjasone	    arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1990286866Sjasone	    flag_decommitted) {
1991235238Sjasone		size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1992235238Sjasone		    run_ind+run_pages);
1993234370Sjasone		size_t nrun_pages = nrun_size >> LG_PAGE;
1994234370Sjasone
1995234370Sjasone		/*
1996234370Sjasone		 * Remove successor from runs_avail; the coalesced run is
1997234370Sjasone		 * inserted later.
1998234370Sjasone		 */
1999235238Sjasone		assert(arena_mapbits_unallocated_size_get(chunk,
2000235238Sjasone		    run_ind+run_pages+nrun_pages-1) == nrun_size);
2001235238Sjasone		assert(arena_mapbits_dirty_get(chunk,
2002235238Sjasone		    run_ind+run_pages+nrun_pages-1) == flag_dirty);
2003286866Sjasone		assert(arena_mapbits_decommitted_get(chunk,
2004286866Sjasone		    run_ind+run_pages+nrun_pages-1) == flag_decommitted);
2005286866Sjasone		arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
2006234370Sjasone
2007286866Sjasone		/*
2008286866Sjasone		 * If the successor is dirty, remove it from the set of dirty
2009286866Sjasone		 * pages.
2010286866Sjasone		 */
2011286866Sjasone		if (flag_dirty != 0) {
2012286866Sjasone			arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
2013286866Sjasone			    nrun_pages);
2014286866Sjasone		}
2015286866Sjasone
2016234370Sjasone		size += nrun_size;
2017234370Sjasone		run_pages += nrun_pages;
2018234370Sjasone
2019235238Sjasone		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2020235238Sjasone		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2021235238Sjasone		    size);
2022234370Sjasone	}
2023234370Sjasone
2024234370Sjasone	/* Try to coalesce backward. */
2025261071Sjasone	if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
2026261071Sjasone	    run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
2027286866Sjasone	    flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
2028286866Sjasone	    flag_decommitted) {
2029235238Sjasone		size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
2030235238Sjasone		    run_ind-1);
2031234370Sjasone		size_t prun_pages = prun_size >> LG_PAGE;
2032234370Sjasone
2033234370Sjasone		run_ind -= prun_pages;
2034234370Sjasone
2035234370Sjasone		/*
2036234370Sjasone		 * Remove predecessor from runs_avail; the coalesced run is
2037234370Sjasone		 * inserted later.
2038234370Sjasone		 */
2039235238Sjasone		assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2040235238Sjasone		    prun_size);
2041235238Sjasone		assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
2042286866Sjasone		assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2043286866Sjasone		    flag_decommitted);
2044286866Sjasone		arena_avail_remove(arena, chunk, run_ind, prun_pages);
2045234370Sjasone
2046286866Sjasone		/*
2047286866Sjasone		 * If the predecessor is dirty, remove it from the set of dirty
2048286866Sjasone		 * pages.
2049286866Sjasone		 */
2050286866Sjasone		if (flag_dirty != 0) {
2051286866Sjasone			arena_run_dirty_remove(arena, chunk, run_ind,
2052286866Sjasone			    prun_pages);
2053286866Sjasone		}
2054286866Sjasone
2055234370Sjasone		size += prun_size;
2056234370Sjasone		run_pages += prun_pages;
2057234370Sjasone
2058235238Sjasone		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2059235238Sjasone		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2060235238Sjasone		    size);
2061234370Sjasone	}
2062234370Sjasone
2063261071Sjasone	*p_size = size;
2064261071Sjasone	*p_run_ind = run_ind;
2065261071Sjasone	*p_run_pages = run_pages;
2066261071Sjasone}
2067261071Sjasone
2068286866Sjasonestatic size_t
2069286866Sjasonearena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2070286866Sjasone    size_t run_ind)
2071261071Sjasone{
2072286866Sjasone	size_t size;
2073261071Sjasone
2074261071Sjasone	assert(run_ind >= map_bias);
2075261071Sjasone	assert(run_ind < chunk_npages);
2076286866Sjasone
2077261071Sjasone	if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2078261071Sjasone		size = arena_mapbits_large_size_get(chunk, run_ind);
2079286866Sjasone		assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2080261071Sjasone		    run_ind+(size>>LG_PAGE)-1) == 0);
2081261071Sjasone	} else {
2082286866Sjasone		arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2083261071Sjasone		size = bin_info->run_size;
2084261071Sjasone	}
2085286866Sjasone
2086286866Sjasone	return (size);
2087286866Sjasone}
2088286866Sjasone
2089286866Sjasonestatic void
2090299587Sjasonearena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
2091299587Sjasone    bool cleaned, bool decommitted)
2092286866Sjasone{
2093286866Sjasone	arena_chunk_t *chunk;
2094286866Sjasone	arena_chunk_map_misc_t *miscelm;
2095286866Sjasone	size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
2096286866Sjasone
2097286866Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2098286866Sjasone	miscelm = arena_run_to_miscelm(run);
2099286866Sjasone	run_ind = arena_miscelm_to_pageind(miscelm);
2100286866Sjasone	assert(run_ind >= map_bias);
2101286866Sjasone	assert(run_ind < chunk_npages);
2102286866Sjasone	size = arena_run_size_get(arena, chunk, run, run_ind);
2103261071Sjasone	run_pages = (size >> LG_PAGE);
2104296221Sjasone	arena_nactive_sub(arena, run_pages);
2105261071Sjasone
2106261071Sjasone	/*
2107261071Sjasone	 * The run is dirty if the caller claims to have dirtied it, as well as
2108261071Sjasone	 * if it was already dirty before being allocated and the caller
2109261071Sjasone	 * doesn't claim to have cleaned it.
2110261071Sjasone	 */
2111261071Sjasone	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2112261071Sjasone	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2113286866Sjasone	if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2114286866Sjasone	    != 0)
2115261071Sjasone		dirty = true;
2116261071Sjasone	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
2117286866Sjasone	flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
2118261071Sjasone
2119261071Sjasone	/* Mark pages as unallocated in the chunk map. */
2120286866Sjasone	if (dirty || decommitted) {
2121286866Sjasone		size_t flags = flag_dirty | flag_decommitted;
2122286866Sjasone		arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
2123261071Sjasone		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2124286866Sjasone		    flags);
2125261071Sjasone	} else {
2126261071Sjasone		arena_mapbits_unallocated_set(chunk, run_ind, size,
2127261071Sjasone		    arena_mapbits_unzeroed_get(chunk, run_ind));
2128261071Sjasone		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2129261071Sjasone		    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2130261071Sjasone	}
2131261071Sjasone
2132261071Sjasone	arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2133286866Sjasone	    flag_dirty, flag_decommitted);
2134261071Sjasone
2135234370Sjasone	/* Insert into runs_avail, now that coalescing is complete. */
2136235238Sjasone	assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2137235238Sjasone	    arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2138235238Sjasone	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2139235238Sjasone	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2140286866Sjasone	assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2141286866Sjasone	    arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
2142286866Sjasone	arena_avail_insert(arena, chunk, run_ind, run_pages);
2143234370Sjasone
2144286866Sjasone	if (dirty)
2145286866Sjasone		arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
2146286866Sjasone
2147235238Sjasone	/* Deallocate chunk if it is now completely unused. */
2148286866Sjasone	if (size == arena_maxrun) {
2149235238Sjasone		assert(run_ind == map_bias);
2150286866Sjasone		assert(run_pages == (arena_maxrun >> LG_PAGE));
2151299587Sjasone		arena_chunk_dalloc(tsdn, arena, chunk);
2152235238Sjasone	}
2153234370Sjasone
2154234370Sjasone	/*
2155234370Sjasone	 * It is okay to do dirty page processing here even if the chunk was
2156234370Sjasone	 * deallocated above, since in that case it is the spare.  Waiting
2157234370Sjasone	 * until after possible chunk deallocation to do dirty processing
2158234370Sjasone	 * allows for an old spare to be fully deallocated, thus decreasing the
2159234370Sjasone	 * chances of spuriously crossing the dirty page purging threshold.
2160234370Sjasone	 */
2161234370Sjasone	if (dirty)
2162299587Sjasone		arena_maybe_purge(tsdn, arena);
2163234370Sjasone}
2164234370Sjasone
2165234370Sjasonestatic void
2166299587Sjasonearena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2167299587Sjasone    arena_run_t *run, size_t oldsize, size_t newsize)
2168234370Sjasone{
2169286866Sjasone	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2170286866Sjasone	size_t pageind = arena_miscelm_to_pageind(miscelm);
2171234370Sjasone	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
2172235238Sjasone	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2173286866Sjasone	size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2174286866Sjasone	size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2175286866Sjasone	    CHUNK_MAP_UNZEROED : 0;
2176234370Sjasone
2177234370Sjasone	assert(oldsize > newsize);
2178234370Sjasone
2179234370Sjasone	/*
2180234370Sjasone	 * Update the chunk map so that arena_run_dalloc() can treat the
2181234370Sjasone	 * leading run as separately allocated.  Set the last element of each
2182234370Sjasone	 * run first, in case of single-page runs.
2183234370Sjasone	 */
2184235238Sjasone	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2185286866Sjasone	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2186286866Sjasone	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2187286866Sjasone	    pageind+head_npages-1)));
2188286866Sjasone	arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2189286866Sjasone	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2190234370Sjasone
2191234370Sjasone	if (config_debug) {
2192234370Sjasone		UNUSED size_t tail_npages = newsize >> LG_PAGE;
2193235238Sjasone		assert(arena_mapbits_large_size_get(chunk,
2194235238Sjasone		    pageind+head_npages+tail_npages-1) == 0);
2195235238Sjasone		assert(arena_mapbits_dirty_get(chunk,
2196235238Sjasone		    pageind+head_npages+tail_npages-1) == flag_dirty);
2197234370Sjasone	}
2198235322Sjasone	arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
2199286866Sjasone	    flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2200286866Sjasone	    pageind+head_npages)));
2201234370Sjasone
2202299587Sjasone	arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
2203299587Sjasone	    0));
2204234370Sjasone}
2205234370Sjasone
2206234370Sjasonestatic void
2207299587Sjasonearena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2208299587Sjasone    arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
2209234370Sjasone{
2210286866Sjasone	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2211286866Sjasone	size_t pageind = arena_miscelm_to_pageind(miscelm);
2212234370Sjasone	size_t head_npages = newsize >> LG_PAGE;
2213235238Sjasone	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2214286866Sjasone	size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2215286866Sjasone	size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2216286866Sjasone	    CHUNK_MAP_UNZEROED : 0;
2217286866Sjasone	arena_chunk_map_misc_t *tail_miscelm;
2218286866Sjasone	arena_run_t *tail_run;
2219234370Sjasone
2220234370Sjasone	assert(oldsize > newsize);
2221234370Sjasone
2222234370Sjasone	/*
2223234370Sjasone	 * Update the chunk map so that arena_run_dalloc() can treat the
2224234370Sjasone	 * trailing run as separately allocated.  Set the last element of each
2225234370Sjasone	 * run first, in case of single-page runs.
2226234370Sjasone	 */
2227235238Sjasone	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2228286866Sjasone	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2229286866Sjasone	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2230286866Sjasone	    pageind+head_npages-1)));
2231286866Sjasone	arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2232286866Sjasone	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2233234370Sjasone
2234235238Sjasone	if (config_debug) {
2235235238Sjasone		UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2236235238Sjasone		assert(arena_mapbits_large_size_get(chunk,
2237235238Sjasone		    pageind+head_npages+tail_npages-1) == 0);
2238235238Sjasone		assert(arena_mapbits_dirty_get(chunk,
2239235238Sjasone		    pageind+head_npages+tail_npages-1) == flag_dirty);
2240235238Sjasone	}
2241235238Sjasone	arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
2242286866Sjasone	    flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2243286866Sjasone	    pageind+head_npages)));
2244234370Sjasone
2245299587Sjasone	tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
2246286866Sjasone	tail_run = &tail_miscelm->run;
2247299587Sjasone	arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
2248299587Sjasone	    != 0));
2249234370Sjasone}
2250234370Sjasone
2251234370Sjasonestatic void
2252234370Sjasonearena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2253234370Sjasone{
2254286866Sjasone	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2255234370Sjasone
2256299587Sjasone	arena_run_heap_insert(&bin->runs, miscelm);
2257234370Sjasone}
2258234370Sjasone
2259299587Sjasonestatic arena_run_t *
2260299587Sjasonearena_bin_nonfull_run_tryget(arena_bin_t *bin)
2261234370Sjasone{
2262299587Sjasone	arena_chunk_map_misc_t *miscelm;
2263234370Sjasone
2264299587Sjasone	miscelm = arena_run_heap_remove_first(&bin->runs);
2265299587Sjasone	if (miscelm == NULL)
2266299587Sjasone		return (NULL);
2267299587Sjasone	if (config_stats)
2268299587Sjasone		bin->stats.reruns++;
2269234370Sjasone
2270299587Sjasone	return (&miscelm->run);
2271234370Sjasone}
2272234370Sjasone
2273234370Sjasonestatic arena_run_t *
2274299587Sjasonearena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2275234370Sjasone{
2276234370Sjasone	arena_run_t *run;
2277288090Sjasone	szind_t binind;
2278234370Sjasone	arena_bin_info_t *bin_info;
2279234370Sjasone
2280234370Sjasone	/* Look for a usable run. */
2281234370Sjasone	run = arena_bin_nonfull_run_tryget(bin);
2282234370Sjasone	if (run != NULL)
2283234370Sjasone		return (run);
2284234370Sjasone	/* No existing runs have any space available. */
2285234370Sjasone
2286234370Sjasone	binind = arena_bin_index(arena, bin);
2287234370Sjasone	bin_info = &arena_bin_info[binind];
2288234370Sjasone
2289234370Sjasone	/* Allocate a new run. */
2290299587Sjasone	malloc_mutex_unlock(tsdn, &bin->lock);
2291234370Sjasone	/******************************/
2292299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
2293299587Sjasone	run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
2294234370Sjasone	if (run != NULL) {
2295234370Sjasone		/* Initialize run internals. */
2296286866Sjasone		run->binind = binind;
2297234370Sjasone		run->nfree = bin_info->nregs;
2298286866Sjasone		bitmap_init(run->bitmap, &bin_info->bitmap_info);
2299234370Sjasone	}
2300299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
2301234370Sjasone	/********************************/
2302299587Sjasone	malloc_mutex_lock(tsdn, &bin->lock);
2303234370Sjasone	if (run != NULL) {
2304234370Sjasone		if (config_stats) {
2305234370Sjasone			bin->stats.nruns++;
2306234370Sjasone			bin->stats.curruns++;
2307234370Sjasone		}
2308234370Sjasone		return (run);
2309234370Sjasone	}
2310234370Sjasone
2311234370Sjasone	/*
2312261071Sjasone	 * arena_run_alloc_small() failed, but another thread may have made
2313234370Sjasone	 * sufficient memory available while this one dropped bin->lock above,
2314234370Sjasone	 * so search one more time.
2315234370Sjasone	 */
2316234370Sjasone	run = arena_bin_nonfull_run_tryget(bin);
2317234370Sjasone	if (run != NULL)
2318234370Sjasone		return (run);
2319234370Sjasone
2320234370Sjasone	return (NULL);
2321234370Sjasone}
2322234370Sjasone
2323234370Sjasone/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
2324234370Sjasonestatic void *
2325299587Sjasonearena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2326234370Sjasone{
2327288090Sjasone	szind_t binind;
2328234370Sjasone	arena_bin_info_t *bin_info;
2329234370Sjasone	arena_run_t *run;
2330234370Sjasone
2331234370Sjasone	binind = arena_bin_index(arena, bin);
2332234370Sjasone	bin_info = &arena_bin_info[binind];
2333234370Sjasone	bin->runcur = NULL;
2334299587Sjasone	run = arena_bin_nonfull_run_get(tsdn, arena, bin);
2335234370Sjasone	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2336234370Sjasone		/*
2337234370Sjasone		 * Another thread updated runcur while this one ran without the
2338234370Sjasone		 * bin lock in arena_bin_nonfull_run_get().
2339234370Sjasone		 */
2340288090Sjasone		void *ret;
2341234370Sjasone		assert(bin->runcur->nfree > 0);
2342234370Sjasone		ret = arena_run_reg_alloc(bin->runcur, bin_info);
2343234370Sjasone		if (run != NULL) {
2344234370Sjasone			arena_chunk_t *chunk;
2345234370Sjasone
2346234370Sjasone			/*
2347261071Sjasone			 * arena_run_alloc_small() may have allocated run, or
2348261071Sjasone			 * it may have pulled run from the bin's run tree.
2349261071Sjasone			 * Therefore it is unsafe to make any assumptions about
2350261071Sjasone			 * how run has previously been used, and
2351261071Sjasone			 * arena_bin_lower_run() must be called, as if a region
2352261071Sjasone			 * were just deallocated from the run.
2353234370Sjasone			 */
2354234370Sjasone			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2355299587Sjasone			if (run->nfree == bin_info->nregs) {
2356299587Sjasone				arena_dalloc_bin_run(tsdn, arena, chunk, run,
2357299587Sjasone				    bin);
2358299587Sjasone			} else
2359234370Sjasone				arena_bin_lower_run(arena, chunk, run, bin);
2360234370Sjasone		}
2361234370Sjasone		return (ret);
2362234370Sjasone	}
2363234370Sjasone
2364234370Sjasone	if (run == NULL)
2365234370Sjasone		return (NULL);
2366234370Sjasone
2367234370Sjasone	bin->runcur = run;
2368234370Sjasone
2369234370Sjasone	assert(bin->runcur->nfree > 0);
2370234370Sjasone
2371234370Sjasone	return (arena_run_reg_alloc(bin->runcur, bin_info));
2372234370Sjasone}
2373234370Sjasone
2374234370Sjasonevoid
2375299587Sjasonearena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
2376296221Sjasone    szind_t binind, uint64_t prof_accumbytes)
2377234370Sjasone{
2378234370Sjasone	unsigned i, nfill;
2379234370Sjasone	arena_bin_t *bin;
2380234370Sjasone
2381234370Sjasone	assert(tbin->ncached == 0);
2382234370Sjasone
2383299587Sjasone	if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2384299587Sjasone		prof_idump(tsdn);
2385234370Sjasone	bin = &arena->bins[binind];
2386299587Sjasone	malloc_mutex_lock(tsdn, &bin->lock);
2387234370Sjasone	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2388234370Sjasone	    tbin->lg_fill_div); i < nfill; i++) {
2389288090Sjasone		arena_run_t *run;
2390288090Sjasone		void *ptr;
2391234370Sjasone		if ((run = bin->runcur) != NULL && run->nfree > 0)
2392234370Sjasone			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2393234370Sjasone		else
2394299587Sjasone			ptr = arena_bin_malloc_hard(tsdn, arena, bin);
2395286866Sjasone		if (ptr == NULL) {
2396286866Sjasone			/*
2397286866Sjasone			 * OOM.  tbin->avail isn't yet filled down to its first
2398286866Sjasone			 * element, so the successful allocations (if any) must
2399296221Sjasone			 * be moved just before tbin->avail before bailing out.
2400286866Sjasone			 */
2401286866Sjasone			if (i > 0) {
2402296221Sjasone				memmove(tbin->avail - i, tbin->avail - nfill,
2403286866Sjasone				    i * sizeof(void *));
2404286866Sjasone			}
2405234370Sjasone			break;
2406286866Sjasone		}
2407286866Sjasone		if (config_fill && unlikely(opt_junk_alloc)) {
2408234370Sjasone			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2409234370Sjasone			    true);
2410234370Sjasone		}
2411234370Sjasone		/* Insert such that low regions get used first. */
2412296221Sjasone		*(tbin->avail - nfill + i) = ptr;
2413234370Sjasone	}
2414234370Sjasone	if (config_stats) {
2415234370Sjasone		bin->stats.nmalloc += i;
2416234370Sjasone		bin->stats.nrequests += tbin->tstats.nrequests;
2417286866Sjasone		bin->stats.curregs += i;
2418234370Sjasone		bin->stats.nfills++;
2419234370Sjasone		tbin->tstats.nrequests = 0;
2420234370Sjasone	}
2421299587Sjasone	malloc_mutex_unlock(tsdn, &bin->lock);
2422234370Sjasone	tbin->ncached = i;
2423299587Sjasone	arena_decay_tick(tsdn, arena);
2424234370Sjasone}
2425234370Sjasone
2426234370Sjasonevoid
2427234370Sjasonearena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2428234370Sjasone{
2429234370Sjasone
2430299587Sjasone	size_t redzone_size = bin_info->redzone_size;
2431299587Sjasone
2432234370Sjasone	if (zero) {
2433299587Sjasone		memset((void *)((uintptr_t)ptr - redzone_size),
2434299587Sjasone		    JEMALLOC_ALLOC_JUNK, redzone_size);
2435299587Sjasone		memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2436299587Sjasone		    JEMALLOC_ALLOC_JUNK, redzone_size);
2437234370Sjasone	} else {
2438299587Sjasone		memset((void *)((uintptr_t)ptr - redzone_size),
2439299587Sjasone		    JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
2440234370Sjasone	}
2441234370Sjasone}
2442234370Sjasone
2443261071Sjasone#ifdef JEMALLOC_JET
2444261071Sjasone#undef arena_redzone_corruption
2445299587Sjasone#define	arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
2446261071Sjasone#endif
2447261071Sjasonestatic void
2448261071Sjasonearena_redzone_corruption(void *ptr, size_t usize, bool after,
2449261071Sjasone    size_t offset, uint8_t byte)
2450234370Sjasone{
2451261071Sjasone
2452261071Sjasone	malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2453261071Sjasone	    "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
2454261071Sjasone	    after ? "after" : "before", ptr, usize, byte);
2455261071Sjasone}
2456261071Sjasone#ifdef JEMALLOC_JET
2457261071Sjasone#undef arena_redzone_corruption
2458261071Sjasone#define	arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2459261071Sjasonearena_redzone_corruption_t *arena_redzone_corruption =
2460299587Sjasone    JEMALLOC_N(n_arena_redzone_corruption);
2461261071Sjasone#endif
2462261071Sjasone
2463261071Sjasonestatic void
2464261071Sjasonearena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
2465261071Sjasone{
2466234370Sjasone	bool error = false;
2467234370Sjasone
2468286866Sjasone	if (opt_junk_alloc) {
2469288090Sjasone		size_t size = bin_info->reg_size;
2470288090Sjasone		size_t redzone_size = bin_info->redzone_size;
2471288090Sjasone		size_t i;
2472288090Sjasone
2473286866Sjasone		for (i = 1; i <= redzone_size; i++) {
2474286866Sjasone			uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2475299587Sjasone			if (*byte != JEMALLOC_ALLOC_JUNK) {
2476286866Sjasone				error = true;
2477286866Sjasone				arena_redzone_corruption(ptr, size, false, i,
2478286866Sjasone				    *byte);
2479286866Sjasone				if (reset)
2480299587Sjasone					*byte = JEMALLOC_ALLOC_JUNK;
2481286866Sjasone			}
2482234370Sjasone		}
2483286866Sjasone		for (i = 0; i < redzone_size; i++) {
2484286866Sjasone			uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
2485299587Sjasone			if (*byte != JEMALLOC_ALLOC_JUNK) {
2486286866Sjasone				error = true;
2487286866Sjasone				arena_redzone_corruption(ptr, size, true, i,
2488286866Sjasone				    *byte);
2489286866Sjasone				if (reset)
2490299587Sjasone					*byte = JEMALLOC_ALLOC_JUNK;
2491286866Sjasone			}
2492234370Sjasone		}
2493234370Sjasone	}
2494286866Sjasone
2495234370Sjasone	if (opt_abort && error)
2496234370Sjasone		abort();
2497261071Sjasone}
2498234370Sjasone
2499261071Sjasone#ifdef JEMALLOC_JET
2500261071Sjasone#undef arena_dalloc_junk_small
2501299587Sjasone#define	arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
2502261071Sjasone#endif
2503261071Sjasonevoid
2504261071Sjasonearena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2505261071Sjasone{
2506261071Sjasone	size_t redzone_size = bin_info->redzone_size;
2507261071Sjasone
2508261071Sjasone	arena_redzones_validate(ptr, bin_info, false);
2509299587Sjasone	memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
2510234370Sjasone	    bin_info->reg_interval);
2511234370Sjasone}
2512261071Sjasone#ifdef JEMALLOC_JET
2513261071Sjasone#undef arena_dalloc_junk_small
2514261071Sjasone#define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2515261071Sjasonearena_dalloc_junk_small_t *arena_dalloc_junk_small =
2516299587Sjasone    JEMALLOC_N(n_arena_dalloc_junk_small);
2517261071Sjasone#endif
2518234370Sjasone
2519261071Sjasonevoid
2520261071Sjasonearena_quarantine_junk_small(void *ptr, size_t usize)
2521261071Sjasone{
2522288090Sjasone	szind_t binind;
2523261071Sjasone	arena_bin_info_t *bin_info;
2524261071Sjasone	cassert(config_fill);
2525286866Sjasone	assert(opt_junk_free);
2526261071Sjasone	assert(opt_quarantine);
2527261071Sjasone	assert(usize <= SMALL_MAXCLASS);
2528261071Sjasone
2529286866Sjasone	binind = size2index(usize);
2530261071Sjasone	bin_info = &arena_bin_info[binind];
2531261071Sjasone	arena_redzones_validate(ptr, bin_info, true);
2532261071Sjasone}
2533261071Sjasone
2534296221Sjasonestatic void *
2535299587Sjasonearena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2536234370Sjasone{
2537234370Sjasone	void *ret;
2538234370Sjasone	arena_bin_t *bin;
2539296221Sjasone	size_t usize;
2540234370Sjasone	arena_run_t *run;
2541234370Sjasone
2542234370Sjasone	assert(binind < NBINS);
2543234370Sjasone	bin = &arena->bins[binind];
2544296221Sjasone	usize = index2size(binind);
2545234370Sjasone
2546299587Sjasone	malloc_mutex_lock(tsdn, &bin->lock);
2547234370Sjasone	if ((run = bin->runcur) != NULL && run->nfree > 0)
2548234370Sjasone		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2549234370Sjasone	else
2550299587Sjasone		ret = arena_bin_malloc_hard(tsdn, arena, bin);
2551234370Sjasone
2552234370Sjasone	if (ret == NULL) {
2553299587Sjasone		malloc_mutex_unlock(tsdn, &bin->lock);
2554234370Sjasone		return (NULL);
2555234370Sjasone	}
2556234370Sjasone
2557234370Sjasone	if (config_stats) {
2558234370Sjasone		bin->stats.nmalloc++;
2559234370Sjasone		bin->stats.nrequests++;
2560286866Sjasone		bin->stats.curregs++;
2561234370Sjasone	}
2562299587Sjasone	malloc_mutex_unlock(tsdn, &bin->lock);
2563299587Sjasone	if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2564299587Sjasone		prof_idump(tsdn);
2565234370Sjasone
2566286866Sjasone	if (!zero) {
2567234370Sjasone		if (config_fill) {
2568286866Sjasone			if (unlikely(opt_junk_alloc)) {
2569234370Sjasone				arena_alloc_junk_small(ret,
2570234370Sjasone				    &arena_bin_info[binind], false);
2571286866Sjasone			} else if (unlikely(opt_zero))
2572296221Sjasone				memset(ret, 0, usize);
2573234370Sjasone		}
2574296221Sjasone		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2575234370Sjasone	} else {
2576286866Sjasone		if (config_fill && unlikely(opt_junk_alloc)) {
2577234370Sjasone			arena_alloc_junk_small(ret, &arena_bin_info[binind],
2578234370Sjasone			    true);
2579234370Sjasone		}
2580296221Sjasone		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2581296221Sjasone		memset(ret, 0, usize);
2582234370Sjasone	}
2583234370Sjasone
2584299587Sjasone	arena_decay_tick(tsdn, arena);
2585234370Sjasone	return (ret);
2586234370Sjasone}
2587234370Sjasone
2588234370Sjasonevoid *
2589299587Sjasonearena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2590234370Sjasone{
2591234370Sjasone	void *ret;
2592286866Sjasone	size_t usize;
2593286866Sjasone	uintptr_t random_offset;
2594286866Sjasone	arena_run_t *run;
2595286866Sjasone	arena_chunk_map_misc_t *miscelm;
2596299587Sjasone	UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
2597234370Sjasone
2598234370Sjasone	/* Large allocation. */
2599296221Sjasone	usize = index2size(binind);
2600299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
2601286866Sjasone	if (config_cache_oblivious) {
2602286866Sjasone		uint64_t r;
2603286866Sjasone
2604286866Sjasone		/*
2605286866Sjasone		 * Compute a uniformly distributed offset within the first page
2606286866Sjasone		 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2607286866Sjasone		 * for 4 KiB pages and 64-byte cachelines.
2608286866Sjasone		 */
2609296221Sjasone		r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
2610286866Sjasone		random_offset = ((uintptr_t)r) << LG_CACHELINE;
2611286866Sjasone	} else
2612286866Sjasone		random_offset = 0;
2613299587Sjasone	run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
2614286866Sjasone	if (run == NULL) {
2615299587Sjasone		malloc_mutex_unlock(tsdn, &arena->lock);
2616234370Sjasone		return (NULL);
2617234370Sjasone	}
2618286866Sjasone	miscelm = arena_run_to_miscelm(run);
2619286866Sjasone	ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2620286866Sjasone	    random_offset);
2621234370Sjasone	if (config_stats) {
2622296221Sjasone		szind_t index = binind - NBINS;
2623286866Sjasone
2624234370Sjasone		arena->stats.nmalloc_large++;
2625234370Sjasone		arena->stats.nrequests_large++;
2626286866Sjasone		arena->stats.allocated_large += usize;
2627286866Sjasone		arena->stats.lstats[index].nmalloc++;
2628286866Sjasone		arena->stats.lstats[index].nrequests++;
2629286866Sjasone		arena->stats.lstats[index].curruns++;
2630234370Sjasone	}
2631234370Sjasone	if (config_prof)
2632286866Sjasone		idump = arena_prof_accum_locked(arena, usize);
2633299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
2634251300Sjasone	if (config_prof && idump)
2635299587Sjasone		prof_idump(tsdn);
2636234370Sjasone
2637286866Sjasone	if (!zero) {
2638234370Sjasone		if (config_fill) {
2639286866Sjasone			if (unlikely(opt_junk_alloc))
2640299587Sjasone				memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2641286866Sjasone			else if (unlikely(opt_zero))
2642286866Sjasone				memset(ret, 0, usize);
2643234370Sjasone		}
2644234370Sjasone	}
2645234370Sjasone
2646299587Sjasone	arena_decay_tick(tsdn, arena);
2647234370Sjasone	return (ret);
2648234370Sjasone}
2649234370Sjasone
2650296221Sjasonevoid *
2651299587Sjasonearena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
2652299587Sjasone    bool zero)
2653296221Sjasone{
2654296221Sjasone
2655299587Sjasone	assert(!tsdn_null(tsdn) || arena != NULL);
2656299587Sjasone
2657299587Sjasone	if (likely(!tsdn_null(tsdn)))
2658299587Sjasone		arena = arena_choose(tsdn_tsd(tsdn), arena);
2659296221Sjasone	if (unlikely(arena == NULL))
2660296221Sjasone		return (NULL);
2661296221Sjasone
2662296221Sjasone	if (likely(size <= SMALL_MAXCLASS))
2663299587Sjasone		return (arena_malloc_small(tsdn, arena, ind, zero));
2664296221Sjasone	if (likely(size <= large_maxclass))
2665299587Sjasone		return (arena_malloc_large(tsdn, arena, ind, zero));
2666299587Sjasone	return (huge_malloc(tsdn, arena, index2size(ind), zero));
2667296221Sjasone}
2668296221Sjasone
2669234370Sjasone/* Only handles large allocations that require more than page alignment. */
2670286866Sjasonestatic void *
2671299587Sjasonearena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2672286866Sjasone    bool zero)
2673234370Sjasone{
2674234370Sjasone	void *ret;
2675234370Sjasone	size_t alloc_size, leadsize, trailsize;
2676234370Sjasone	arena_run_t *run;
2677234370Sjasone	arena_chunk_t *chunk;
2678286866Sjasone	arena_chunk_map_misc_t *miscelm;
2679286866Sjasone	void *rpages;
2680234370Sjasone
2681299587Sjasone	assert(!tsdn_null(tsdn) || arena != NULL);
2682286866Sjasone	assert(usize == PAGE_CEILING(usize));
2683234370Sjasone
2684299587Sjasone	if (likely(!tsdn_null(tsdn)))
2685299587Sjasone		arena = arena_choose(tsdn_tsd(tsdn), arena);
2686286866Sjasone	if (unlikely(arena == NULL))
2687286866Sjasone		return (NULL);
2688286866Sjasone
2689234370Sjasone	alignment = PAGE_CEILING(alignment);
2690301718Sjasone	alloc_size = usize + large_pad + alignment - PAGE;
2691234370Sjasone
2692299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
2693299587Sjasone	run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
2694234370Sjasone	if (run == NULL) {
2695299587Sjasone		malloc_mutex_unlock(tsdn, &arena->lock);
2696234370Sjasone		return (NULL);
2697234370Sjasone	}
2698234370Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2699286866Sjasone	miscelm = arena_run_to_miscelm(run);
2700286866Sjasone	rpages = arena_miscelm_to_rpages(miscelm);
2701234370Sjasone
2702286866Sjasone	leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2703286866Sjasone	    (uintptr_t)rpages;
2704286866Sjasone	assert(alloc_size >= leadsize + usize);
2705286866Sjasone	trailsize = alloc_size - leadsize - usize - large_pad;
2706234370Sjasone	if (leadsize != 0) {
2707286866Sjasone		arena_chunk_map_misc_t *head_miscelm = miscelm;
2708286866Sjasone		arena_run_t *head_run = run;
2709286866Sjasone
2710299587Sjasone		miscelm = arena_miscelm_get_mutable(chunk,
2711286866Sjasone		    arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2712286866Sjasone		    LG_PAGE));
2713286866Sjasone		run = &miscelm->run;
2714286866Sjasone
2715299587Sjasone		arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
2716286866Sjasone		    alloc_size - leadsize);
2717234370Sjasone	}
2718234370Sjasone	if (trailsize != 0) {
2719299587Sjasone		arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
2720286866Sjasone		    trailsize, usize + large_pad, false);
2721234370Sjasone	}
2722286866Sjasone	if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2723286866Sjasone		size_t run_ind =
2724286866Sjasone		    arena_miscelm_to_pageind(arena_run_to_miscelm(run));
2725286866Sjasone		bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2726286866Sjasone		bool decommitted = (arena_mapbits_decommitted_get(chunk,
2727286866Sjasone		    run_ind) != 0);
2728234370Sjasone
2729286866Sjasone		assert(decommitted); /* Cause of OOM. */
2730299587Sjasone		arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2731299587Sjasone		malloc_mutex_unlock(tsdn, &arena->lock);
2732286866Sjasone		return (NULL);
2733286866Sjasone	}
2734286866Sjasone	ret = arena_miscelm_to_rpages(miscelm);
2735286866Sjasone
2736234370Sjasone	if (config_stats) {
2737288090Sjasone		szind_t index = size2index(usize) - NBINS;
2738286866Sjasone
2739234370Sjasone		arena->stats.nmalloc_large++;
2740234370Sjasone		arena->stats.nrequests_large++;
2741286866Sjasone		arena->stats.allocated_large += usize;
2742286866Sjasone		arena->stats.lstats[index].nmalloc++;
2743286866Sjasone		arena->stats.lstats[index].nrequests++;
2744286866Sjasone		arena->stats.lstats[index].curruns++;
2745234370Sjasone	}
2746299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
2747234370Sjasone
2748286866Sjasone	if (config_fill && !zero) {
2749286866Sjasone		if (unlikely(opt_junk_alloc))
2750299587Sjasone			memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2751286866Sjasone		else if (unlikely(opt_zero))
2752286866Sjasone			memset(ret, 0, usize);
2753234370Sjasone	}
2754299587Sjasone	arena_decay_tick(tsdn, arena);
2755234370Sjasone	return (ret);
2756234370Sjasone}
2757234370Sjasone
2758286866Sjasonevoid *
2759299587Sjasonearena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2760286866Sjasone    bool zero, tcache_t *tcache)
2761286866Sjasone{
2762286866Sjasone	void *ret;
2763286866Sjasone
2764286866Sjasone	if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2765286866Sjasone	    && (usize & PAGE_MASK) == 0))) {
2766286866Sjasone		/* Small; alignment doesn't require special run placement. */
2767299587Sjasone		ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2768296221Sjasone		    tcache, true);
2769288090Sjasone	} else if (usize <= large_maxclass && alignment <= PAGE) {
2770286866Sjasone		/*
2771286866Sjasone		 * Large; alignment doesn't require special run placement.
2772286866Sjasone		 * However, the cached pointer may be at a random offset from
2773286866Sjasone		 * the base of the run, so do some bit manipulation to retrieve
2774286866Sjasone		 * the base.
2775286866Sjasone		 */
2776299587Sjasone		ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2777296221Sjasone		    tcache, true);
2778286866Sjasone		if (config_cache_oblivious)
2779286866Sjasone			ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2780286866Sjasone	} else {
2781288090Sjasone		if (likely(usize <= large_maxclass)) {
2782299587Sjasone			ret = arena_palloc_large(tsdn, arena, usize, alignment,
2783286866Sjasone			    zero);
2784286866Sjasone		} else if (likely(alignment <= chunksize))
2785299587Sjasone			ret = huge_malloc(tsdn, arena, usize, zero);
2786286866Sjasone		else {
2787299587Sjasone			ret = huge_palloc(tsdn, arena, usize, alignment, zero);
2788286866Sjasone		}
2789286866Sjasone	}
2790286866Sjasone	return (ret);
2791286866Sjasone}
2792286866Sjasone
2793234370Sjasonevoid
2794299587Sjasonearena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
2795234370Sjasone{
2796234370Sjasone	arena_chunk_t *chunk;
2797286866Sjasone	size_t pageind;
2798288090Sjasone	szind_t binind;
2799234370Sjasone
2800234543Sjasone	cassert(config_prof);
2801234370Sjasone	assert(ptr != NULL);
2802234370Sjasone	assert(CHUNK_ADDR2BASE(ptr) != ptr);
2803299587Sjasone	assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2804299587Sjasone	assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
2805234370Sjasone	assert(size <= SMALL_MAXCLASS);
2806234370Sjasone
2807234370Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2808234370Sjasone	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2809286866Sjasone	binind = size2index(size);
2810234370Sjasone	assert(binind < NBINS);
2811235238Sjasone	arena_mapbits_large_binind_set(chunk, pageind, binind);
2812234370Sjasone
2813299587Sjasone	assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2814299587Sjasone	assert(isalloc(tsdn, ptr, true) == size);
2815234370Sjasone}
2816234370Sjasone
2817234370Sjasonestatic void
2818234370Sjasonearena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2819234370Sjasone    arena_bin_t *bin)
2820234370Sjasone{
2821234370Sjasone
2822234370Sjasone	/* Dissociate run from bin. */
2823234370Sjasone	if (run == bin->runcur)
2824234370Sjasone		bin->runcur = NULL;
2825234370Sjasone	else {
2826288090Sjasone		szind_t binind = arena_bin_index(extent_node_arena_get(
2827286866Sjasone		    &chunk->node), bin);
2828234370Sjasone		arena_bin_info_t *bin_info = &arena_bin_info[binind];
2829234370Sjasone
2830299587Sjasone		/*
2831299587Sjasone		 * The following block's conditional is necessary because if the
2832299587Sjasone		 * run only contains one region, then it never gets inserted
2833299587Sjasone		 * into the non-full runs tree.
2834299587Sjasone		 */
2835234370Sjasone		if (bin_info->nregs != 1) {
2836299587Sjasone			arena_chunk_map_misc_t *miscelm =
2837299587Sjasone			    arena_run_to_miscelm(run);
2838299587Sjasone
2839299587Sjasone			arena_run_heap_remove(&bin->runs, miscelm);
2840234370Sjasone		}
2841234370Sjasone	}
2842234370Sjasone}
2843234370Sjasone
2844234370Sjasonestatic void
2845299587Sjasonearena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2846299587Sjasone    arena_run_t *run, arena_bin_t *bin)
2847234370Sjasone{
2848234370Sjasone
2849234370Sjasone	assert(run != bin->runcur);
2850234370Sjasone
2851299587Sjasone	malloc_mutex_unlock(tsdn, &bin->lock);
2852234370Sjasone	/******************************/
2853299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
2854299587Sjasone	arena_run_dalloc(tsdn, arena, run, true, false, false);
2855299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
2856234370Sjasone	/****************************/
2857299587Sjasone	malloc_mutex_lock(tsdn, &bin->lock);
2858234370Sjasone	if (config_stats)
2859234370Sjasone		bin->stats.curruns--;
2860234370Sjasone}
2861234370Sjasone
2862234370Sjasonestatic void
2863234370Sjasonearena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2864234370Sjasone    arena_bin_t *bin)
2865234370Sjasone{
2866234370Sjasone
2867234370Sjasone	/*
2868234370Sjasone	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2869234370Sjasone	 * non-full run.  It is okay to NULL runcur out rather than proactively
2870234370Sjasone	 * keeping it pointing at the lowest non-full run.
2871234370Sjasone	 */
2872234370Sjasone	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
2873234370Sjasone		/* Switch runcur. */
2874234370Sjasone		if (bin->runcur->nfree > 0)
2875234370Sjasone			arena_bin_runs_insert(bin, bin->runcur);
2876234370Sjasone		bin->runcur = run;
2877234370Sjasone		if (config_stats)
2878234370Sjasone			bin->stats.reruns++;
2879234370Sjasone	} else
2880234370Sjasone		arena_bin_runs_insert(bin, run);
2881234370Sjasone}
2882234370Sjasone
2883286866Sjasonestatic void
2884299587Sjasonearena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2885299587Sjasone    void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
2886234370Sjasone{
2887286866Sjasone	size_t pageind, rpages_ind;
2888234370Sjasone	arena_run_t *run;
2889234370Sjasone	arena_bin_t *bin;
2890235238Sjasone	arena_bin_info_t *bin_info;
2891288090Sjasone	szind_t binind;
2892234370Sjasone
2893234370Sjasone	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2894286866Sjasone	rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2895299587Sjasone	run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2896286866Sjasone	binind = run->binind;
2897286866Sjasone	bin = &arena->bins[binind];
2898235238Sjasone	bin_info = &arena_bin_info[binind];
2899234370Sjasone
2900286866Sjasone	if (!junked && config_fill && unlikely(opt_junk_free))
2901234370Sjasone		arena_dalloc_junk_small(ptr, bin_info);
2902234370Sjasone
2903234370Sjasone	arena_run_reg_dalloc(run, ptr);
2904234370Sjasone	if (run->nfree == bin_info->nregs) {
2905234370Sjasone		arena_dissociate_bin_run(chunk, run, bin);
2906299587Sjasone		arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
2907234370Sjasone	} else if (run->nfree == 1 && run != bin->runcur)
2908234370Sjasone		arena_bin_lower_run(arena, chunk, run, bin);
2909234370Sjasone
2910234370Sjasone	if (config_stats) {
2911234370Sjasone		bin->stats.ndalloc++;
2912286866Sjasone		bin->stats.curregs--;
2913234370Sjasone	}
2914234370Sjasone}
2915234370Sjasone
2916234370Sjasonevoid
2917299587Sjasonearena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2918299587Sjasone    arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
2919286866Sjasone{
2920286866Sjasone
2921299587Sjasone	arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
2922286866Sjasone}
2923286866Sjasone
2924286866Sjasonevoid
2925299587Sjasonearena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
2926286866Sjasone    size_t pageind, arena_chunk_map_bits_t *bitselm)
2927235238Sjasone{
2928235238Sjasone	arena_run_t *run;
2929235238Sjasone	arena_bin_t *bin;
2930286866Sjasone	size_t rpages_ind;
2931235238Sjasone
2932286866Sjasone	rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2933299587Sjasone	run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2934286866Sjasone	bin = &arena->bins[run->binind];
2935299587Sjasone	malloc_mutex_lock(tsdn, &bin->lock);
2936299587Sjasone	arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2937299587Sjasone	malloc_mutex_unlock(tsdn, &bin->lock);
2938235238Sjasone}
2939235238Sjasone
2940235238Sjasonevoid
2941299587Sjasonearena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2942299587Sjasone    void *ptr, size_t pageind)
2943235238Sjasone{
2944286866Sjasone	arena_chunk_map_bits_t *bitselm;
2945235238Sjasone
2946235238Sjasone	if (config_debug) {
2947235238Sjasone		/* arena_ptr_small_binind_get() does extra sanity checking. */
2948235238Sjasone		assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2949235238Sjasone		    pageind)) != BININD_INVALID);
2950235238Sjasone	}
2951299587Sjasone	bitselm = arena_bitselm_get_mutable(chunk, pageind);
2952299587Sjasone	arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
2953299587Sjasone	arena_decay_tick(tsdn, arena);
2954235238Sjasone}
2955234370Sjasone
2956261071Sjasone#ifdef JEMALLOC_JET
2957261071Sjasone#undef arena_dalloc_junk_large
2958299587Sjasone#define	arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
2959261071Sjasone#endif
2960286866Sjasonevoid
2961261071Sjasonearena_dalloc_junk_large(void *ptr, size_t usize)
2962261071Sjasone{
2963261071Sjasone
2964286866Sjasone	if (config_fill && unlikely(opt_junk_free))
2965299587Sjasone		memset(ptr, JEMALLOC_FREE_JUNK, usize);
2966261071Sjasone}
2967261071Sjasone#ifdef JEMALLOC_JET
2968261071Sjasone#undef arena_dalloc_junk_large
2969261071Sjasone#define	arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2970261071Sjasonearena_dalloc_junk_large_t *arena_dalloc_junk_large =
2971299587Sjasone    JEMALLOC_N(n_arena_dalloc_junk_large);
2972261071Sjasone#endif
2973261071Sjasone
2974288090Sjasonestatic void
2975299587Sjasonearena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
2976299587Sjasone    arena_chunk_t *chunk, void *ptr, bool junked)
2977234370Sjasone{
2978286866Sjasone	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2979299587Sjasone	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2980299587Sjasone	    pageind);
2981286866Sjasone	arena_run_t *run = &miscelm->run;
2982234370Sjasone
2983234370Sjasone	if (config_fill || config_stats) {
2984286866Sjasone		size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2985286866Sjasone		    large_pad;
2986234370Sjasone
2987286866Sjasone		if (!junked)
2988286866Sjasone			arena_dalloc_junk_large(ptr, usize);
2989234370Sjasone		if (config_stats) {
2990288090Sjasone			szind_t index = size2index(usize) - NBINS;
2991286866Sjasone
2992234370Sjasone			arena->stats.ndalloc_large++;
2993261071Sjasone			arena->stats.allocated_large -= usize;
2994286866Sjasone			arena->stats.lstats[index].ndalloc++;
2995286866Sjasone			arena->stats.lstats[index].curruns--;
2996234370Sjasone		}
2997234370Sjasone	}
2998234370Sjasone
2999299587Sjasone	arena_run_dalloc(tsdn, arena, run, true, false, false);
3000234370Sjasone}
3001234370Sjasone
3002235238Sjasonevoid
3003299587Sjasonearena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
3004299587Sjasone    arena_chunk_t *chunk, void *ptr)
3005286866Sjasone{
3006286866Sjasone
3007299587Sjasone	arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
3008286866Sjasone}
3009286866Sjasone
3010286866Sjasonevoid
3011299587Sjasonearena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3012299587Sjasone    void *ptr)
3013235238Sjasone{
3014235238Sjasone
3015299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
3016299587Sjasone	arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
3017299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
3018299587Sjasone	arena_decay_tick(tsdn, arena);
3019235238Sjasone}
3020235238Sjasone
3021234370Sjasonestatic void
3022299587Sjasonearena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3023299587Sjasone    void *ptr, size_t oldsize, size_t size)
3024234370Sjasone{
3025286866Sjasone	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3026299587Sjasone	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3027299587Sjasone	    pageind);
3028286866Sjasone	arena_run_t *run = &miscelm->run;
3029234370Sjasone
3030234370Sjasone	assert(size < oldsize);
3031234370Sjasone
3032234370Sjasone	/*
3033234370Sjasone	 * Shrink the run, and make trailing pages available for other
3034234370Sjasone	 * allocations.
3035234370Sjasone	 */
3036299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
3037299587Sjasone	arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
3038286866Sjasone	    large_pad, true);
3039234370Sjasone	if (config_stats) {
3040288090Sjasone		szind_t oldindex = size2index(oldsize) - NBINS;
3041288090Sjasone		szind_t index = size2index(size) - NBINS;
3042286866Sjasone
3043234370Sjasone		arena->stats.ndalloc_large++;
3044234370Sjasone		arena->stats.allocated_large -= oldsize;
3045286866Sjasone		arena->stats.lstats[oldindex].ndalloc++;
3046286866Sjasone		arena->stats.lstats[oldindex].curruns--;
3047234370Sjasone
3048234370Sjasone		arena->stats.nmalloc_large++;
3049234370Sjasone		arena->stats.nrequests_large++;
3050234370Sjasone		arena->stats.allocated_large += size;
3051286866Sjasone		arena->stats.lstats[index].nmalloc++;
3052286866Sjasone		arena->stats.lstats[index].nrequests++;
3053286866Sjasone		arena->stats.lstats[index].curruns++;
3054234370Sjasone	}
3055299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
3056234370Sjasone}
3057234370Sjasone
3058234370Sjasonestatic bool
3059299587Sjasonearena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3060299587Sjasone    void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
3061234370Sjasone{
3062234370Sjasone	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3063286866Sjasone	size_t npages = (oldsize + large_pad) >> LG_PAGE;
3064234370Sjasone	size_t followsize;
3065234370Sjasone
3066286866Sjasone	assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3067286866Sjasone	    large_pad);
3068234370Sjasone
3069234370Sjasone	/* Try to extend the run. */
3070299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
3071288090Sjasone	if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3072288090Sjasone	    pageind+npages) != 0)
3073288090Sjasone		goto label_fail;
3074288090Sjasone	followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3075288090Sjasone	if (oldsize + followsize >= usize_min) {
3076234370Sjasone		/*
3077234370Sjasone		 * The next run is available and sufficiently large.  Split the
3078234370Sjasone		 * following run, then merge the first part with the existing
3079234370Sjasone		 * allocation.
3080234370Sjasone		 */
3081286866Sjasone		arena_run_t *run;
3082288090Sjasone		size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
3083234370Sjasone
3084288090Sjasone		usize = usize_max;
3085286866Sjasone		while (oldsize + followsize < usize)
3086286866Sjasone			usize = index2size(size2index(usize)-1);
3087286866Sjasone		assert(usize >= usize_min);
3088288090Sjasone		assert(usize >= oldsize);
3089286866Sjasone		splitsize = usize - oldsize;
3090288090Sjasone		if (splitsize == 0)
3091288090Sjasone			goto label_fail;
3092286866Sjasone
3093299587Sjasone		run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
3094288090Sjasone		if (arena_run_split_large(arena, run, splitsize, zero))
3095288090Sjasone			goto label_fail;
3096286866Sjasone
3097289900Sjasone		if (config_cache_oblivious && zero) {
3098289900Sjasone			/*
3099289900Sjasone			 * Zero the trailing bytes of the original allocation's
3100289900Sjasone			 * last page, since they are in an indeterminate state.
3101289900Sjasone			 * There will always be trailing bytes, because ptr's
3102289900Sjasone			 * offset from the beginning of the run is a multiple of
3103289900Sjasone			 * CACHELINE in [0 .. PAGE).
3104289900Sjasone			 */
3105289900Sjasone			void *zbase = (void *)((uintptr_t)ptr + oldsize);
3106289900Sjasone			void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3107289900Sjasone			    PAGE));
3108289900Sjasone			size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3109289900Sjasone			assert(nzero > 0);
3110289900Sjasone			memset(zbase, 0, nzero);
3111289900Sjasone		}
3112289900Sjasone
3113234370Sjasone		size = oldsize + splitsize;
3114286866Sjasone		npages = (size + large_pad) >> LG_PAGE;
3115234370Sjasone
3116234370Sjasone		/*
3117234370Sjasone		 * Mark the extended run as dirty if either portion of the run
3118234370Sjasone		 * was dirty before allocation.  This is rather pedantic,
3119234370Sjasone		 * because there's not actually any sequence of events that
3120234370Sjasone		 * could cause the resulting run to be passed to
3121234370Sjasone		 * arena_run_dalloc() with the dirty argument set to false
3122234370Sjasone		 * (which is when dirty flag consistency would really matter).
3123234370Sjasone		 */
3124235238Sjasone		flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3125235238Sjasone		    arena_mapbits_dirty_get(chunk, pageind+npages-1);
3126286866Sjasone		flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
3127286866Sjasone		arena_mapbits_large_set(chunk, pageind, size + large_pad,
3128286866Sjasone		    flag_dirty | (flag_unzeroed_mask &
3129286866Sjasone		    arena_mapbits_unzeroed_get(chunk, pageind)));
3130286866Sjasone		arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3131286866Sjasone		    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3132286866Sjasone		    pageind+npages-1)));
3133234370Sjasone
3134234370Sjasone		if (config_stats) {
3135288090Sjasone			szind_t oldindex = size2index(oldsize) - NBINS;
3136288090Sjasone			szind_t index = size2index(size) - NBINS;
3137286866Sjasone
3138234370Sjasone			arena->stats.ndalloc_large++;
3139234370Sjasone			arena->stats.allocated_large -= oldsize;
3140286866Sjasone			arena->stats.lstats[oldindex].ndalloc++;
3141286866Sjasone			arena->stats.lstats[oldindex].curruns--;
3142234370Sjasone
3143234370Sjasone			arena->stats.nmalloc_large++;
3144234370Sjasone			arena->stats.nrequests_large++;
3145234370Sjasone			arena->stats.allocated_large += size;
3146286866Sjasone			arena->stats.lstats[index].nmalloc++;
3147286866Sjasone			arena->stats.lstats[index].nrequests++;
3148286866Sjasone			arena->stats.lstats[index].curruns++;
3149234370Sjasone		}
3150299587Sjasone		malloc_mutex_unlock(tsdn, &arena->lock);
3151234370Sjasone		return (false);
3152234370Sjasone	}
3153288090Sjasonelabel_fail:
3154299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
3155234370Sjasone	return (true);
3156234370Sjasone}
3157234370Sjasone
3158261071Sjasone#ifdef JEMALLOC_JET
3159261071Sjasone#undef arena_ralloc_junk_large
3160299587Sjasone#define	arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
3161261071Sjasone#endif
3162261071Sjasonestatic void
3163261071Sjasonearena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3164261071Sjasone{
3165261071Sjasone
3166286866Sjasone	if (config_fill && unlikely(opt_junk_free)) {
3167299587Sjasone		memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
3168261071Sjasone		    old_usize - usize);
3169261071Sjasone	}
3170261071Sjasone}
3171261071Sjasone#ifdef JEMALLOC_JET
3172261071Sjasone#undef arena_ralloc_junk_large
3173261071Sjasone#define	arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3174261071Sjasonearena_ralloc_junk_large_t *arena_ralloc_junk_large =
3175299587Sjasone    JEMALLOC_N(n_arena_ralloc_junk_large);
3176261071Sjasone#endif
3177261071Sjasone
3178234370Sjasone/*
3179234370Sjasone * Try to resize a large allocation, in order to avoid copying.  This will
3180234370Sjasone * always fail if growing an object, and the following run is already in use.
3181234370Sjasone */
3182234370Sjasonestatic bool
3183299587Sjasonearena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
3184288090Sjasone    size_t usize_max, bool zero)
3185234370Sjasone{
3186288090Sjasone	arena_chunk_t *chunk;
3187288090Sjasone	arena_t *arena;
3188234370Sjasone
3189288090Sjasone	if (oldsize == usize_max) {
3190288090Sjasone		/* Current size class is compatible and maximal. */
3191234370Sjasone		return (false);
3192288090Sjasone	}
3193234370Sjasone
3194288090Sjasone	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3195288090Sjasone	arena = extent_node_arena_get(&chunk->node);
3196234370Sjasone
3197288090Sjasone	if (oldsize < usize_max) {
3198299587Sjasone		bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
3199299587Sjasone		    oldsize, usize_min, usize_max, zero);
3200288090Sjasone		if (config_fill && !ret && !zero) {
3201288090Sjasone			if (unlikely(opt_junk_alloc)) {
3202299587Sjasone				memset((void *)((uintptr_t)ptr + oldsize),
3203299587Sjasone				    JEMALLOC_ALLOC_JUNK,
3204299587Sjasone				    isalloc(tsdn, ptr, config_prof) - oldsize);
3205288090Sjasone			} else if (unlikely(opt_zero)) {
3206288090Sjasone				memset((void *)((uintptr_t)ptr + oldsize), 0,
3207299587Sjasone				    isalloc(tsdn, ptr, config_prof) - oldsize);
3208234370Sjasone			}
3209234370Sjasone		}
3210288090Sjasone		return (ret);
3211234370Sjasone	}
3212288090Sjasone
3213288090Sjasone	assert(oldsize > usize_max);
3214288090Sjasone	/* Fill before shrinking in order avoid a race. */
3215288090Sjasone	arena_ralloc_junk_large(ptr, oldsize, usize_max);
3216299587Sjasone	arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
3217288090Sjasone	return (false);
3218234370Sjasone}
3219234370Sjasone
3220261071Sjasonebool
3221299587Sjasonearena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
3222296221Sjasone    size_t extra, bool zero)
3223234370Sjasone{
3224288090Sjasone	size_t usize_min, usize_max;
3225234370Sjasone
3226296221Sjasone	/* Calls with non-zero extra had to clamp extra. */
3227296221Sjasone	assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3228296221Sjasone
3229296221Sjasone	if (unlikely(size > HUGE_MAXCLASS))
3230296221Sjasone		return (true);
3231296221Sjasone
3232288090Sjasone	usize_min = s2u(size);
3233288090Sjasone	usize_max = s2u(size + extra);
3234288090Sjasone	if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
3235296221Sjasone		arena_chunk_t *chunk;
3236296221Sjasone
3237286866Sjasone		/*
3238286866Sjasone		 * Avoid moving the allocation if the size class can be left the
3239286866Sjasone		 * same.
3240286866Sjasone		 */
3241288090Sjasone		if (oldsize <= SMALL_MAXCLASS) {
3242288090Sjasone			assert(arena_bin_info[size2index(oldsize)].reg_size ==
3243288090Sjasone			    oldsize);
3244296221Sjasone			if ((usize_max > SMALL_MAXCLASS ||
3245296221Sjasone			    size2index(usize_max) != size2index(oldsize)) &&
3246296221Sjasone			    (size > oldsize || usize_max < oldsize))
3247296221Sjasone				return (true);
3248288090Sjasone		} else {
3249296221Sjasone			if (usize_max <= SMALL_MAXCLASS)
3250296221Sjasone				return (true);
3251299587Sjasone			if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
3252296221Sjasone			    usize_max, zero))
3253296221Sjasone				return (true);
3254234370Sjasone		}
3255234370Sjasone
3256296221Sjasone		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3257299587Sjasone		arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
3258296221Sjasone		return (false);
3259288090Sjasone	} else {
3260299587Sjasone		return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
3261296221Sjasone		    usize_max, zero));
3262288090Sjasone	}
3263234370Sjasone}
3264234370Sjasone
3265288090Sjasonestatic void *
3266299587Sjasonearena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
3267288090Sjasone    size_t alignment, bool zero, tcache_t *tcache)
3268288090Sjasone{
3269288090Sjasone
3270288090Sjasone	if (alignment == 0)
3271299587Sjasone		return (arena_malloc(tsdn, arena, usize, size2index(usize),
3272299587Sjasone		    zero, tcache, true));
3273288090Sjasone	usize = sa2u(usize, alignment);
3274296221Sjasone	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
3275288090Sjasone		return (NULL);
3276299587Sjasone	return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
3277288090Sjasone}
3278288090Sjasone
3279234370Sjasonevoid *
3280286866Sjasonearena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
3281288090Sjasone    size_t alignment, bool zero, tcache_t *tcache)
3282234370Sjasone{
3283234370Sjasone	void *ret;
3284288090Sjasone	size_t usize;
3285234370Sjasone
3286288090Sjasone	usize = s2u(size);
3287296221Sjasone	if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
3288288090Sjasone		return (NULL);
3289288090Sjasone
3290288090Sjasone	if (likely(usize <= large_maxclass)) {
3291286866Sjasone		size_t copysize;
3292234370Sjasone
3293286866Sjasone		/* Try to avoid moving the allocation. */
3294299587Sjasone		if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
3295299587Sjasone		    zero))
3296286866Sjasone			return (ptr);
3297234370Sjasone
3298286866Sjasone		/*
3299286866Sjasone		 * size and oldsize are different enough that we need to move
3300286866Sjasone		 * the object.  In that case, fall back to allocating new space
3301286866Sjasone		 * and copying.
3302286866Sjasone		 */
3303299587Sjasone		ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3304299587Sjasone		    alignment, zero, tcache);
3305288090Sjasone		if (ret == NULL)
3306288090Sjasone			return (NULL);
3307234370Sjasone
3308286866Sjasone		/*
3309286866Sjasone		 * Junk/zero-filling were already done by
3310286866Sjasone		 * ipalloc()/arena_malloc().
3311286866Sjasone		 */
3312286866Sjasone
3313288090Sjasone		copysize = (usize < oldsize) ? usize : oldsize;
3314286866Sjasone		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3315286866Sjasone		memcpy(ret, ptr, copysize);
3316299587Sjasone		isqalloc(tsd, ptr, oldsize, tcache, true);
3317286866Sjasone	} else {
3318288090Sjasone		ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3319288090Sjasone		    zero, tcache);
3320286866Sjasone	}
3321234370Sjasone	return (ret);
3322234370Sjasone}
3323234370Sjasone
3324242844Sjasonedss_prec_t
3325299587Sjasonearena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
3326242844Sjasone{
3327242844Sjasone	dss_prec_t ret;
3328242844Sjasone
3329299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
3330242844Sjasone	ret = arena->dss_prec;
3331299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
3332242844Sjasone	return (ret);
3333242844Sjasone}
3334242844Sjasone
3335286866Sjasonebool
3336299587Sjasonearena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
3337242844Sjasone{
3338242844Sjasone
3339286866Sjasone	if (!have_dss)
3340286866Sjasone		return (dss_prec != dss_prec_disabled);
3341299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
3342242844Sjasone	arena->dss_prec = dss_prec;
3343299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
3344286866Sjasone	return (false);
3345242844Sjasone}
3346242844Sjasone
3347286866Sjasonessize_t
3348286866Sjasonearena_lg_dirty_mult_default_get(void)
3349286866Sjasone{
3350286866Sjasone
3351286866Sjasone	return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3352286866Sjasone}
3353286866Sjasone
3354286866Sjasonebool
3355286866Sjasonearena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3356286866Sjasone{
3357286866Sjasone
3358296221Sjasone	if (opt_purge != purge_mode_ratio)
3359296221Sjasone		return (true);
3360286866Sjasone	if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3361286866Sjasone		return (true);
3362286866Sjasone	atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3363286866Sjasone	return (false);
3364286866Sjasone}
3365286866Sjasone
3366296221Sjasonessize_t
3367296221Sjasonearena_decay_time_default_get(void)
3368242844Sjasone{
3369242844Sjasone
3370296221Sjasone	return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3371296221Sjasone}
3372296221Sjasone
3373296221Sjasonebool
3374296221Sjasonearena_decay_time_default_set(ssize_t decay_time)
3375296221Sjasone{
3376296221Sjasone
3377296221Sjasone	if (opt_purge != purge_mode_decay)
3378296221Sjasone		return (true);
3379296221Sjasone	if (!arena_decay_time_valid(decay_time))
3380296221Sjasone		return (true);
3381296221Sjasone	atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3382296221Sjasone	return (false);
3383296221Sjasone}
3384296221Sjasone
3385296221Sjasonestatic void
3386296221Sjasonearena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3387296221Sjasone    const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3388296221Sjasone    size_t *nactive, size_t *ndirty)
3389296221Sjasone{
3390296221Sjasone
3391299587Sjasone	*nthreads += arena_nthreads_get(arena, false);
3392242844Sjasone	*dss = dss_prec_names[arena->dss_prec];
3393286866Sjasone	*lg_dirty_mult = arena->lg_dirty_mult;
3394296221Sjasone	*decay_time = arena->decay_time;
3395242844Sjasone	*nactive += arena->nactive;
3396242844Sjasone	*ndirty += arena->ndirty;
3397296221Sjasone}
3398242844Sjasone
3399296221Sjasonevoid
3400299587Sjasonearena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3401299587Sjasone    const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3402299587Sjasone    size_t *nactive, size_t *ndirty)
3403296221Sjasone{
3404296221Sjasone
3405299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
3406296221Sjasone	arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3407296221Sjasone	    decay_time, nactive, ndirty);
3408299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
3409296221Sjasone}
3410296221Sjasone
3411296221Sjasonevoid
3412299587Sjasonearena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3413299587Sjasone    const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3414299587Sjasone    size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3415299587Sjasone    malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3416299587Sjasone    malloc_huge_stats_t *hstats)
3417296221Sjasone{
3418296221Sjasone	unsigned i;
3419296221Sjasone
3420296221Sjasone	cassert(config_stats);
3421296221Sjasone
3422299587Sjasone	malloc_mutex_lock(tsdn, &arena->lock);
3423296221Sjasone	arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3424296221Sjasone	    decay_time, nactive, ndirty);
3425296221Sjasone
3426242844Sjasone	astats->mapped += arena->stats.mapped;
3427299587Sjasone	astats->retained += arena->stats.retained;
3428242844Sjasone	astats->npurge += arena->stats.npurge;
3429242844Sjasone	astats->nmadvise += arena->stats.nmadvise;
3430242844Sjasone	astats->purged += arena->stats.purged;
3431286866Sjasone	astats->metadata_mapped += arena->stats.metadata_mapped;
3432286866Sjasone	astats->metadata_allocated += arena_metadata_allocated_get(arena);
3433242844Sjasone	astats->allocated_large += arena->stats.allocated_large;
3434242844Sjasone	astats->nmalloc_large += arena->stats.nmalloc_large;
3435242844Sjasone	astats->ndalloc_large += arena->stats.ndalloc_large;
3436242844Sjasone	astats->nrequests_large += arena->stats.nrequests_large;
3437286866Sjasone	astats->allocated_huge += arena->stats.allocated_huge;
3438286866Sjasone	astats->nmalloc_huge += arena->stats.nmalloc_huge;
3439286866Sjasone	astats->ndalloc_huge += arena->stats.ndalloc_huge;
3440242844Sjasone
3441242844Sjasone	for (i = 0; i < nlclasses; i++) {
3442242844Sjasone		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3443242844Sjasone		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3444242844Sjasone		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3445242844Sjasone		lstats[i].curruns += arena->stats.lstats[i].curruns;
3446242844Sjasone	}
3447286866Sjasone
3448286866Sjasone	for (i = 0; i < nhclasses; i++) {
3449286866Sjasone		hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3450286866Sjasone		hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3451286866Sjasone		hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3452286866Sjasone	}
3453299587Sjasone	malloc_mutex_unlock(tsdn, &arena->lock);
3454242844Sjasone
3455242844Sjasone	for (i = 0; i < NBINS; i++) {
3456242844Sjasone		arena_bin_t *bin = &arena->bins[i];
3457242844Sjasone
3458299587Sjasone		malloc_mutex_lock(tsdn, &bin->lock);
3459242844Sjasone		bstats[i].nmalloc += bin->stats.nmalloc;
3460242844Sjasone		bstats[i].ndalloc += bin->stats.ndalloc;
3461242844Sjasone		bstats[i].nrequests += bin->stats.nrequests;
3462286866Sjasone		bstats[i].curregs += bin->stats.curregs;
3463242844Sjasone		if (config_tcache) {
3464242844Sjasone			bstats[i].nfills += bin->stats.nfills;
3465242844Sjasone			bstats[i].nflushes += bin->stats.nflushes;
3466242844Sjasone		}
3467242844Sjasone		bstats[i].nruns += bin->stats.nruns;
3468242844Sjasone		bstats[i].reruns += bin->stats.reruns;
3469242844Sjasone		bstats[i].curruns += bin->stats.curruns;
3470299587Sjasone		malloc_mutex_unlock(tsdn, &bin->lock);
3471242844Sjasone	}
3472242844Sjasone}
3473242844Sjasone
3474296221Sjasoneunsigned
3475299587Sjasonearena_nthreads_get(arena_t *arena, bool internal)
3476296221Sjasone{
3477296221Sjasone
3478299587Sjasone	return (atomic_read_u(&arena->nthreads[internal]));
3479296221Sjasone}
3480296221Sjasone
3481296221Sjasonevoid
3482299587Sjasonearena_nthreads_inc(arena_t *arena, bool internal)
3483296221Sjasone{
3484296221Sjasone
3485299587Sjasone	atomic_add_u(&arena->nthreads[internal], 1);
3486296221Sjasone}
3487296221Sjasone
3488296221Sjasonevoid
3489299587Sjasonearena_nthreads_dec(arena_t *arena, bool internal)
3490296221Sjasone{
3491296221Sjasone
3492299587Sjasone	atomic_sub_u(&arena->nthreads[internal], 1);
3493296221Sjasone}
3494296221Sjasone
3495286866Sjasonearena_t *
3496299587Sjasonearena_new(tsdn_t *tsdn, unsigned ind)
3497234370Sjasone{
3498286866Sjasone	arena_t *arena;
3499296221Sjasone	size_t arena_size;
3500234370Sjasone	unsigned i;
3501234370Sjasone
3502296221Sjasone	/* Compute arena size to incorporate sufficient runs_avail elements. */
3503299587Sjasone	arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) *
3504296221Sjasone	    runs_avail_nclasses);
3505286866Sjasone	/*
3506286866Sjasone	 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3507286866Sjasone	 * because there is no way to clean up if base_alloc() OOMs.
3508286866Sjasone	 */
3509286866Sjasone	if (config_stats) {
3510299587Sjasone		arena = (arena_t *)base_alloc(tsdn,
3511299587Sjasone		    CACHELINE_CEILING(arena_size) + QUANTUM_CEILING(nlclasses *
3512299587Sjasone		    sizeof(malloc_large_stats_t) + nhclasses) *
3513299587Sjasone		    sizeof(malloc_huge_stats_t));
3514286866Sjasone	} else
3515299587Sjasone		arena = (arena_t *)base_alloc(tsdn, arena_size);
3516286866Sjasone	if (arena == NULL)
3517286866Sjasone		return (NULL);
3518286866Sjasone
3519234370Sjasone	arena->ind = ind;
3520299587Sjasone	arena->nthreads[0] = arena->nthreads[1] = 0;
3521299587Sjasone	if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
3522286866Sjasone		return (NULL);
3523234370Sjasone
3524234370Sjasone	if (config_stats) {
3525234370Sjasone		memset(&arena->stats, 0, sizeof(arena_stats_t));
3526286866Sjasone		arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3527296221Sjasone		    + CACHELINE_CEILING(arena_size));
3528234370Sjasone		memset(arena->stats.lstats, 0, nlclasses *
3529234370Sjasone		    sizeof(malloc_large_stats_t));
3530286866Sjasone		arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3531296221Sjasone		    + CACHELINE_CEILING(arena_size) +
3532286866Sjasone		    QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3533286866Sjasone		memset(arena->stats.hstats, 0, nhclasses *
3534286866Sjasone		    sizeof(malloc_huge_stats_t));
3535234370Sjasone		if (config_tcache)
3536234370Sjasone			ql_new(&arena->tcache_ql);
3537234370Sjasone	}
3538234370Sjasone
3539234370Sjasone	if (config_prof)
3540234370Sjasone		arena->prof_accumbytes = 0;
3541234370Sjasone
3542286866Sjasone	if (config_cache_oblivious) {
3543286866Sjasone		/*
3544286866Sjasone		 * A nondeterministic seed based on the address of arena reduces
3545286866Sjasone		 * the likelihood of lockstep non-uniform cache index
3546286866Sjasone		 * utilization among identical concurrent processes, but at the
3547286866Sjasone		 * cost of test repeatability.  For debug builds, instead use a
3548286866Sjasone		 * deterministic seed.
3549286866Sjasone		 */
3550286866Sjasone		arena->offset_state = config_debug ? ind :
3551286866Sjasone		    (uint64_t)(uintptr_t)arena;
3552286866Sjasone	}
3553286866Sjasone
3554299587Sjasone	arena->dss_prec = chunk_dss_prec_get(tsdn);
3555242844Sjasone
3556299587Sjasone	ql_new(&arena->achunks);
3557299587Sjasone
3558234370Sjasone	arena->spare = NULL;
3559234370Sjasone
3560286866Sjasone	arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
3561286866Sjasone	arena->purging = false;
3562234370Sjasone	arena->nactive = 0;
3563234370Sjasone	arena->ndirty = 0;
3564234370Sjasone
3565296221Sjasone	for(i = 0; i < runs_avail_nclasses; i++)
3566299587Sjasone		arena_run_heap_new(&arena->runs_avail[i]);
3567286866Sjasone	qr_new(&arena->runs_dirty, rd_link);
3568286866Sjasone	qr_new(&arena->chunks_cache, cc_link);
3569234370Sjasone
3570296221Sjasone	if (opt_purge == purge_mode_decay)
3571296221Sjasone		arena_decay_init(arena, arena_decay_time_default_get());
3572296221Sjasone
3573286866Sjasone	ql_new(&arena->huge);
3574299587Sjasone	if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3575299587Sjasone	    WITNESS_RANK_ARENA_HUGE))
3576286866Sjasone		return (NULL);
3577286866Sjasone
3578286866Sjasone	extent_tree_szad_new(&arena->chunks_szad_cached);
3579286866Sjasone	extent_tree_ad_new(&arena->chunks_ad_cached);
3580286866Sjasone	extent_tree_szad_new(&arena->chunks_szad_retained);
3581286866Sjasone	extent_tree_ad_new(&arena->chunks_ad_retained);
3582299587Sjasone	if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3583299587Sjasone	    WITNESS_RANK_ARENA_CHUNKS))
3584286866Sjasone		return (NULL);
3585286866Sjasone	ql_new(&arena->node_cache);
3586299587Sjasone	if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3587299587Sjasone	    WITNESS_RANK_ARENA_NODE_CACHE))
3588286866Sjasone		return (NULL);
3589286866Sjasone
3590286866Sjasone	arena->chunk_hooks = chunk_hooks_default;
3591286866Sjasone
3592234370Sjasone	/* Initialize bins. */
3593234370Sjasone	for (i = 0; i < NBINS; i++) {
3594299587Sjasone		arena_bin_t *bin = &arena->bins[i];
3595299587Sjasone		if (malloc_mutex_init(&bin->lock, "arena_bin",
3596299587Sjasone		    WITNESS_RANK_ARENA_BIN))
3597286866Sjasone			return (NULL);
3598234370Sjasone		bin->runcur = NULL;
3599299587Sjasone		arena_run_heap_new(&bin->runs);
3600234370Sjasone		if (config_stats)
3601234370Sjasone			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
3602234370Sjasone	}
3603234370Sjasone
3604286866Sjasone	return (arena);
3605234370Sjasone}
3606234370Sjasone
3607234370Sjasone/*
3608234370Sjasone * Calculate bin_info->run_size such that it meets the following constraints:
3609234370Sjasone *
3610286866Sjasone *   *) bin_info->run_size <= arena_maxrun
3611234370Sjasone *   *) bin_info->nregs <= RUN_MAXREGS
3612234370Sjasone *
3613286866Sjasone * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3614286866Sjasone * these settings are all interdependent.
3615234370Sjasone */
3616286866Sjasonestatic void
3617286866Sjasonebin_info_run_size_calc(arena_bin_info_t *bin_info)
3618234370Sjasone{
3619234370Sjasone	size_t pad_size;
3620286866Sjasone	size_t try_run_size, perfect_run_size, actual_run_size;
3621286866Sjasone	uint32_t try_nregs, perfect_nregs, actual_nregs;
3622234370Sjasone
3623234370Sjasone	/*
3624234370Sjasone	 * Determine redzone size based on minimum alignment and minimum
3625234370Sjasone	 * redzone size.  Add padding to the end of the run if it is needed to
3626234370Sjasone	 * align the regions.  The padding allows each redzone to be half the
3627234370Sjasone	 * minimum alignment; without the padding, each redzone would have to
3628234370Sjasone	 * be twice as large in order to maintain alignment.
3629234370Sjasone	 */
3630286866Sjasone	if (config_fill && unlikely(opt_redzone)) {
3631296221Sjasone		size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
3632234370Sjasone		if (align_min <= REDZONE_MINSIZE) {
3633234370Sjasone			bin_info->redzone_size = REDZONE_MINSIZE;
3634234370Sjasone			pad_size = 0;
3635234370Sjasone		} else {
3636234370Sjasone			bin_info->redzone_size = align_min >> 1;
3637234370Sjasone			pad_size = bin_info->redzone_size;
3638234370Sjasone		}
3639234370Sjasone	} else {
3640234370Sjasone		bin_info->redzone_size = 0;
3641234370Sjasone		pad_size = 0;
3642234370Sjasone	}
3643234370Sjasone	bin_info->reg_interval = bin_info->reg_size +
3644234370Sjasone	    (bin_info->redzone_size << 1);
3645234370Sjasone
3646234370Sjasone	/*
3647286866Sjasone	 * Compute run size under ideal conditions (no redzones, no limit on run
3648286866Sjasone	 * size).
3649234370Sjasone	 */
3650286866Sjasone	try_run_size = PAGE;
3651296221Sjasone	try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3652234370Sjasone	do {
3653286866Sjasone		perfect_run_size = try_run_size;
3654286866Sjasone		perfect_nregs = try_nregs;
3655234370Sjasone
3656234370Sjasone		try_run_size += PAGE;
3657296221Sjasone		try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3658286866Sjasone	} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3659286866Sjasone	assert(perfect_nregs <= RUN_MAXREGS);
3660234370Sjasone
3661286866Sjasone	actual_run_size = perfect_run_size;
3662296221Sjasone	actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3663296221Sjasone	    bin_info->reg_interval);
3664234370Sjasone
3665286866Sjasone	/*
3666286866Sjasone	 * Redzones can require enough padding that not even a single region can
3667286866Sjasone	 * fit within the number of pages that would normally be dedicated to a
3668286866Sjasone	 * run for this size class.  Increase the run size until at least one
3669286866Sjasone	 * region fits.
3670286866Sjasone	 */
3671286866Sjasone	while (actual_nregs == 0) {
3672286866Sjasone		assert(config_fill && unlikely(opt_redzone));
3673286866Sjasone
3674286866Sjasone		actual_run_size += PAGE;
3675296221Sjasone		actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3676296221Sjasone		    bin_info->reg_interval);
3677286866Sjasone	}
3678286866Sjasone
3679286866Sjasone	/*
3680286866Sjasone	 * Make sure that the run will fit within an arena chunk.
3681286866Sjasone	 */
3682286866Sjasone	while (actual_run_size > arena_maxrun) {
3683286866Sjasone		actual_run_size -= PAGE;
3684296221Sjasone		actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3685296221Sjasone		    bin_info->reg_interval);
3686286866Sjasone	}
3687286866Sjasone	assert(actual_nregs > 0);
3688286866Sjasone	assert(actual_run_size == s2u(actual_run_size));
3689286866Sjasone
3690234370Sjasone	/* Copy final settings. */
3691286866Sjasone	bin_info->run_size = actual_run_size;
3692286866Sjasone	bin_info->nregs = actual_nregs;
3693296221Sjasone	bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3694296221Sjasone	    bin_info->reg_interval) - pad_size + bin_info->redzone_size);
3695234370Sjasone
3696286866Sjasone	if (actual_run_size > small_maxrun)
3697286866Sjasone		small_maxrun = actual_run_size;
3698286866Sjasone
3699234370Sjasone	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3700234370Sjasone	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
3701234370Sjasone}
3702234370Sjasone
3703234370Sjasonestatic void
3704234370Sjasonebin_info_init(void)
3705234370Sjasone{
3706234370Sjasone	arena_bin_info_t *bin_info;
3707234370Sjasone
3708286866Sjasone#define	BIN_INFO_INIT_bin_yes(index, size)				\
3709286866Sjasone	bin_info = &arena_bin_info[index];				\
3710234370Sjasone	bin_info->reg_size = size;					\
3711286866Sjasone	bin_info_run_size_calc(bin_info);				\
3712234370Sjasone	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
3713286866Sjasone#define	BIN_INFO_INIT_bin_no(index, size)
3714286866Sjasone#define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)	\
3715286866Sjasone	BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3716234370Sjasone	SIZE_CLASSES
3717286866Sjasone#undef BIN_INFO_INIT_bin_yes
3718286866Sjasone#undef BIN_INFO_INIT_bin_no
3719286866Sjasone#undef SC
3720234370Sjasone}
3721234370Sjasone
3722286866Sjasonestatic bool
3723286866Sjasonesmall_run_size_init(void)
3724286866Sjasone{
3725286866Sjasone
3726286866Sjasone	assert(small_maxrun != 0);
3727286866Sjasone
3728299587Sjasone	small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >>
3729286866Sjasone	    LG_PAGE));
3730286866Sjasone	if (small_run_tab == NULL)
3731286866Sjasone		return (true);
3732286866Sjasone
3733286866Sjasone#define	TAB_INIT_bin_yes(index, size) {					\
3734286866Sjasone		arena_bin_info_t *bin_info = &arena_bin_info[index];	\
3735286866Sjasone		small_run_tab[bin_info->run_size >> LG_PAGE] = true;	\
3736286866Sjasone	}
3737286866Sjasone#define	TAB_INIT_bin_no(index, size)
3738286866Sjasone#define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)	\
3739286866Sjasone	TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3740286866Sjasone	SIZE_CLASSES
3741286866Sjasone#undef TAB_INIT_bin_yes
3742286866Sjasone#undef TAB_INIT_bin_no
3743286866Sjasone#undef SC
3744286866Sjasone
3745286866Sjasone	return (false);
3746286866Sjasone}
3747286866Sjasone
3748296221Sjasonestatic bool
3749296221Sjasonerun_quantize_init(void)
3750296221Sjasone{
3751296221Sjasone	unsigned i;
3752296221Sjasone
3753296221Sjasone	run_quantize_max = chunksize + large_pad;
3754296221Sjasone
3755299587Sjasone	run_quantize_floor_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
3756296221Sjasone	    (run_quantize_max >> LG_PAGE));
3757296221Sjasone	if (run_quantize_floor_tab == NULL)
3758296221Sjasone		return (true);
3759296221Sjasone
3760299587Sjasone	run_quantize_ceil_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
3761296221Sjasone	    (run_quantize_max >> LG_PAGE));
3762296221Sjasone	if (run_quantize_ceil_tab == NULL)
3763296221Sjasone		return (true);
3764296221Sjasone
3765296221Sjasone	for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) {
3766296221Sjasone		size_t run_size = i << LG_PAGE;
3767296221Sjasone
3768296221Sjasone		run_quantize_floor_tab[i-1] =
3769296221Sjasone		    run_quantize_floor_compute(run_size);
3770296221Sjasone		run_quantize_ceil_tab[i-1] =
3771296221Sjasone		    run_quantize_ceil_compute(run_size);
3772296221Sjasone	}
3773296221Sjasone
3774296221Sjasone	return (false);
3775296221Sjasone}
3776296221Sjasone
3777286866Sjasonebool
3778234370Sjasonearena_boot(void)
3779234370Sjasone{
3780234370Sjasone	unsigned i;
3781234370Sjasone
3782286866Sjasone	arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3783296221Sjasone	arena_decay_time_default_set(opt_decay_time);
3784286866Sjasone
3785234370Sjasone	/*
3786234370Sjasone	 * Compute the header size such that it is large enough to contain the
3787234370Sjasone	 * page map.  The page map is biased to omit entries for the header
3788234370Sjasone	 * itself, so some iteration is necessary to compute the map bias.
3789234370Sjasone	 *
3790234370Sjasone	 * 1) Compute safe header_size and map_bias values that include enough
3791234370Sjasone	 *    space for an unbiased page map.
3792234370Sjasone	 * 2) Refine map_bias based on (1) to omit the header pages in the page
3793234370Sjasone	 *    map.  The resulting map_bias may be one too small.
3794234370Sjasone	 * 3) Refine map_bias based on (2).  The result will be >= the result
3795234370Sjasone	 *    from (2), and will always be correct.
3796234370Sjasone	 */
3797234370Sjasone	map_bias = 0;
3798234370Sjasone	for (i = 0; i < 3; i++) {
3799288090Sjasone		size_t header_size = offsetof(arena_chunk_t, map_bits) +
3800286866Sjasone		    ((sizeof(arena_chunk_map_bits_t) +
3801286866Sjasone		    sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3802286866Sjasone		map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
3803234370Sjasone	}
3804234370Sjasone	assert(map_bias > 0);
3805234370Sjasone
3806286866Sjasone	map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3807286866Sjasone	    sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3808234370Sjasone
3809286866Sjasone	arena_maxrun = chunksize - (map_bias << LG_PAGE);
3810286866Sjasone	assert(arena_maxrun > 0);
3811288090Sjasone	large_maxclass = index2size(size2index(chunksize)-1);
3812288090Sjasone	if (large_maxclass > arena_maxrun) {
3813286866Sjasone		/*
3814286866Sjasone		 * For small chunk sizes it's possible for there to be fewer
3815286866Sjasone		 * non-header pages available than are necessary to serve the
3816286866Sjasone		 * size classes just below chunksize.
3817286866Sjasone		 */
3818288090Sjasone		large_maxclass = arena_maxrun;
3819286866Sjasone	}
3820288090Sjasone	assert(large_maxclass > 0);
3821288090Sjasone	nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
3822286866Sjasone	nhclasses = NSIZES - nlclasses - NBINS;
3823286866Sjasone
3824234370Sjasone	bin_info_init();
3825296221Sjasone	if (small_run_size_init())
3826296221Sjasone		return (true);
3827296221Sjasone	if (run_quantize_init())
3828296221Sjasone		return (true);
3829296221Sjasone
3830296221Sjasone	runs_avail_bias = size2index(PAGE);
3831296221Sjasone	runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias;
3832296221Sjasone
3833296221Sjasone	return (false);
3834234370Sjasone}
3835234370Sjasone
3836234370Sjasonevoid
3837299587Sjasonearena_prefork0(tsdn_t *tsdn, arena_t *arena)
3838234370Sjasone{
3839299587Sjasone
3840299587Sjasone	malloc_mutex_prefork(tsdn, &arena->lock);
3841299587Sjasone}
3842299587Sjasone
3843299587Sjasonevoid
3844299587Sjasonearena_prefork1(tsdn_t *tsdn, arena_t *arena)
3845299587Sjasone{
3846299587Sjasone
3847299587Sjasone	malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
3848299587Sjasone}
3849299587Sjasone
3850299587Sjasonevoid
3851299587Sjasonearena_prefork2(tsdn_t *tsdn, arena_t *arena)
3852299587Sjasone{
3853299587Sjasone
3854299587Sjasone	malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
3855299587Sjasone}
3856299587Sjasone
3857299587Sjasonevoid
3858299587Sjasonearena_prefork3(tsdn_t *tsdn, arena_t *arena)
3859299587Sjasone{
3860234370Sjasone	unsigned i;
3861234370Sjasone
3862234370Sjasone	for (i = 0; i < NBINS; i++)
3863299587Sjasone		malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3864299587Sjasone	malloc_mutex_prefork(tsdn, &arena->huge_mtx);
3865234370Sjasone}
3866234370Sjasone
3867234370Sjasonevoid
3868299587Sjasonearena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
3869234370Sjasone{
3870234370Sjasone	unsigned i;
3871234370Sjasone
3872299587Sjasone	malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
3873234370Sjasone	for (i = 0; i < NBINS; i++)
3874299587Sjasone		malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3875299587Sjasone	malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3876299587Sjasone	malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3877299587Sjasone	malloc_mutex_postfork_parent(tsdn, &arena->lock);
3878234370Sjasone}
3879234370Sjasone
3880234370Sjasonevoid
3881299587Sjasonearena_postfork_child(tsdn_t *tsdn, arena_t *arena)
3882234370Sjasone{
3883234370Sjasone	unsigned i;
3884234370Sjasone
3885299587Sjasone	malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
3886234370Sjasone	for (i = 0; i < NBINS; i++)
3887299587Sjasone		malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3888299587Sjasone	malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3889299587Sjasone	malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3890299587Sjasone	malloc_mutex_postfork_child(tsdn, &arena->lock);
3891234370Sjasone}
3892