arena.c revision 235322
1#define	JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8arena_bin_info_t	arena_bin_info[NBINS];
9
10JEMALLOC_ALIGNED(CACHELINE)
11const uint8_t	small_size2bin[] = {
12#define	S2B_8(i)	i,
13#define	S2B_16(i)	S2B_8(i) S2B_8(i)
14#define	S2B_32(i)	S2B_16(i) S2B_16(i)
15#define	S2B_64(i)	S2B_32(i) S2B_32(i)
16#define	S2B_128(i)	S2B_64(i) S2B_64(i)
17#define	S2B_256(i)	S2B_128(i) S2B_128(i)
18#define	S2B_512(i)	S2B_256(i) S2B_256(i)
19#define	S2B_1024(i)	S2B_512(i) S2B_512(i)
20#define	S2B_2048(i)	S2B_1024(i) S2B_1024(i)
21#define	S2B_4096(i)	S2B_2048(i) S2B_2048(i)
22#define	S2B_8192(i)	S2B_4096(i) S2B_4096(i)
23#define	SIZE_CLASS(bin, delta, size)					\
24	S2B_##delta(bin)
25	SIZE_CLASSES
26#undef S2B_8
27#undef S2B_16
28#undef S2B_32
29#undef S2B_64
30#undef S2B_128
31#undef S2B_256
32#undef S2B_512
33#undef S2B_1024
34#undef S2B_2048
35#undef S2B_4096
36#undef S2B_8192
37#undef SIZE_CLASS
38};
39
40/******************************************************************************/
41/* Function prototypes for non-inline static functions. */
42
43static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
44    bool large, size_t binind, bool zero);
45static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
46static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
47static arena_run_t	*arena_run_alloc_helper(arena_t *arena, size_t size,
48    bool large, size_t binind, bool zero);
49static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
50    size_t binind, bool zero);
51static void	arena_purge(arena_t *arena, bool all);
52static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
53static void	arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
54    arena_run_t *run, size_t oldsize, size_t newsize);
55static void	arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
56    arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
57static arena_run_t	*arena_bin_runs_first(arena_bin_t *bin);
58static void	arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
59static void	arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
60static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
61static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
62static void	*arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
63static void	arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
64    arena_bin_t *bin);
65static void	arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
66    arena_run_t *run, arena_bin_t *bin);
67static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
68    arena_run_t *run, arena_bin_t *bin);
69static void	arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
70    void *ptr, size_t oldsize, size_t size);
71static bool	arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
72    void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
73static bool	arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
74    size_t extra, bool zero);
75static size_t	bin_info_run_size_calc(arena_bin_info_t *bin_info,
76    size_t min_run_size);
77static void	bin_info_init(void);
78
79/******************************************************************************/
80
81static inline int
82arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
83{
84	uintptr_t a_mapelm = (uintptr_t)a;
85	uintptr_t b_mapelm = (uintptr_t)b;
86
87	assert(a != NULL);
88	assert(b != NULL);
89
90	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
91}
92
93/* Generate red-black tree functions. */
94rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
95    u.rb_link, arena_run_comp)
96
97static inline int
98arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
99{
100	int ret;
101	size_t a_size = a->bits & ~PAGE_MASK;
102	size_t b_size = b->bits & ~PAGE_MASK;
103
104	assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
105	    CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
106
107	ret = (a_size > b_size) - (a_size < b_size);
108	if (ret == 0) {
109		uintptr_t a_mapelm, b_mapelm;
110
111		if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
112			a_mapelm = (uintptr_t)a;
113		else {
114			/*
115			 * Treat keys as though they are lower than anything
116			 * else.
117			 */
118			a_mapelm = 0;
119		}
120		b_mapelm = (uintptr_t)b;
121
122		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
123	}
124
125	return (ret);
126}
127
128/* Generate red-black tree functions. */
129rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
130    u.rb_link, arena_avail_comp)
131
132static inline void *
133arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
134{
135	void *ret;
136	unsigned regind;
137	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
138	    (uintptr_t)bin_info->bitmap_offset);
139
140	assert(run->nfree > 0);
141	assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
142
143	regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
144	ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
145	    (uintptr_t)(bin_info->reg_interval * regind));
146	run->nfree--;
147	if (regind == run->nextind)
148		run->nextind++;
149	assert(regind < run->nextind);
150	return (ret);
151}
152
153static inline void
154arena_run_reg_dalloc(arena_run_t *run, void *ptr)
155{
156	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
157	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
158	size_t mapbits = arena_mapbits_get(chunk, pageind);
159	size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
160	arena_bin_info_t *bin_info = &arena_bin_info[binind];
161	unsigned regind = arena_run_regind(run, bin_info, ptr);
162	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
163	    (uintptr_t)bin_info->bitmap_offset);
164
165	assert(run->nfree < bin_info->nregs);
166	/* Freeing an interior pointer can cause assertion failure. */
167	assert(((uintptr_t)ptr - ((uintptr_t)run +
168	    (uintptr_t)bin_info->reg0_offset)) %
169	    (uintptr_t)bin_info->reg_interval == 0);
170	assert((uintptr_t)ptr >= (uintptr_t)run +
171	    (uintptr_t)bin_info->reg0_offset);
172	/* Freeing an unallocated pointer can cause assertion failure. */
173	assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
174
175	bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
176	run->nfree++;
177}
178
179static inline void
180arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
181{
182	size_t i;
183	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
184
185	for (i = 0; i < PAGE / sizeof(size_t); i++)
186		assert(p[i] == 0);
187}
188
189static void
190arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
191    size_t binind, bool zero)
192{
193	arena_chunk_t *chunk;
194	size_t run_ind, total_pages, need_pages, rem_pages, i;
195	size_t flag_dirty;
196	arena_avail_tree_t *runs_avail;
197
198	assert((large && binind == BININD_INVALID) || (large == false && binind
199	    != BININD_INVALID));
200
201	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
202	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
203	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
204	runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
205	    &arena->runs_avail_clean;
206	total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
207	    LG_PAGE;
208	assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
209	    flag_dirty);
210	need_pages = (size >> LG_PAGE);
211	assert(need_pages > 0);
212	assert(need_pages <= total_pages);
213	rem_pages = total_pages - need_pages;
214
215	arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, run_ind));
216	if (config_stats) {
217		/*
218		 * Update stats_cactive if nactive is crossing a chunk
219		 * multiple.
220		 */
221		size_t cactive_diff = CHUNK_CEILING((arena->nactive +
222		    need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
223		    LG_PAGE);
224		if (cactive_diff != 0)
225			stats_cactive_add(cactive_diff);
226	}
227	arena->nactive += need_pages;
228
229	/* Keep track of trailing unused pages for later use. */
230	if (rem_pages > 0) {
231		if (flag_dirty != 0) {
232			arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
233			    (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
234			arena_mapbits_unallocated_set(chunk,
235			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
236			    CHUNK_MAP_DIRTY);
237		} else {
238			arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
239			    (rem_pages << LG_PAGE),
240			    arena_mapbits_unzeroed_get(chunk,
241			    run_ind+need_pages));
242			arena_mapbits_unallocated_set(chunk,
243			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
244			    arena_mapbits_unzeroed_get(chunk,
245			    run_ind+total_pages-1));
246		}
247		arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
248		    run_ind+need_pages));
249	}
250
251	/* Update dirty page accounting. */
252	if (flag_dirty != 0) {
253		chunk->ndirty -= need_pages;
254		arena->ndirty -= need_pages;
255	}
256
257	/*
258	 * Update the page map separately for large vs. small runs, since it is
259	 * possible to avoid iteration for large mallocs.
260	 */
261	if (large) {
262		if (zero) {
263			if (flag_dirty == 0) {
264				/*
265				 * The run is clean, so some pages may be
266				 * zeroed (i.e. never before touched).
267				 */
268				for (i = 0; i < need_pages; i++) {
269					if (arena_mapbits_unzeroed_get(chunk,
270					    run_ind+i) != 0) {
271						VALGRIND_MAKE_MEM_UNDEFINED(
272						    (void *)((uintptr_t)
273						    chunk + ((run_ind+i) <<
274						    LG_PAGE)), PAGE);
275						memset((void *)((uintptr_t)
276						    chunk + ((run_ind+i) <<
277						    LG_PAGE)), 0, PAGE);
278					} else if (config_debug) {
279						VALGRIND_MAKE_MEM_DEFINED(
280						    (void *)((uintptr_t)
281						    chunk + ((run_ind+i) <<
282						    LG_PAGE)), PAGE);
283						arena_chunk_validate_zeroed(
284						    chunk, run_ind+i);
285					}
286				}
287			} else {
288				/*
289				 * The run is dirty, so all pages must be
290				 * zeroed.
291				 */
292				VALGRIND_MAKE_MEM_UNDEFINED((void
293				    *)((uintptr_t)chunk + (run_ind <<
294				    LG_PAGE)), (need_pages << LG_PAGE));
295				memset((void *)((uintptr_t)chunk + (run_ind <<
296				    LG_PAGE)), 0, (need_pages << LG_PAGE));
297			}
298		}
299
300		/*
301		 * Set the last element first, in case the run only contains one
302		 * page (i.e. both statements set the same element).
303		 */
304		arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
305		    flag_dirty);
306		arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
307	} else {
308		assert(zero == false);
309		/*
310		 * Propagate the dirty and unzeroed flags to the allocated
311		 * small run, so that arena_dalloc_bin_run() has the ability to
312		 * conditionally trim clean pages.
313		 */
314		arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
315		/*
316		 * The first page will always be dirtied during small run
317		 * initialization, so a validation failure here would not
318		 * actually cause an observable failure.
319		 */
320		if (config_debug && flag_dirty == 0 &&
321		    arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
322			arena_chunk_validate_zeroed(chunk, run_ind);
323		for (i = 1; i < need_pages - 1; i++) {
324			arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
325			if (config_debug && flag_dirty == 0 &&
326			    arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
327				arena_chunk_validate_zeroed(chunk, run_ind+i);
328		}
329		arena_mapbits_small_set(chunk, run_ind+need_pages-1,
330		    need_pages-1, binind, flag_dirty);
331		if (config_debug && flag_dirty == 0 &&
332		    arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
333		    0) {
334			arena_chunk_validate_zeroed(chunk,
335			    run_ind+need_pages-1);
336		}
337	}
338}
339
340static arena_chunk_t *
341arena_chunk_alloc(arena_t *arena)
342{
343	arena_chunk_t *chunk;
344	size_t i;
345
346	if (arena->spare != NULL) {
347		arena_avail_tree_t *runs_avail;
348
349		chunk = arena->spare;
350		arena->spare = NULL;
351
352		assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
353		assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
354		assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
355		    arena_maxclass);
356		assert(arena_mapbits_unallocated_size_get(chunk,
357		    chunk_npages-1) == arena_maxclass);
358		assert(arena_mapbits_dirty_get(chunk, map_bias) ==
359		    arena_mapbits_dirty_get(chunk, chunk_npages-1));
360
361		/* Insert the run into the appropriate runs_avail_* tree. */
362		if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
363			runs_avail = &arena->runs_avail_clean;
364		else
365			runs_avail = &arena->runs_avail_dirty;
366		arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
367		    map_bias));
368	} else {
369		bool zero;
370		size_t unzeroed;
371
372		zero = false;
373		malloc_mutex_unlock(&arena->lock);
374		chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
375		    false, &zero);
376		malloc_mutex_lock(&arena->lock);
377		if (chunk == NULL)
378			return (NULL);
379		if (config_stats)
380			arena->stats.mapped += chunksize;
381
382		chunk->arena = arena;
383		ql_elm_new(chunk, link_dirty);
384		chunk->dirtied = false;
385
386		/*
387		 * Claim that no pages are in use, since the header is merely
388		 * overhead.
389		 */
390		chunk->ndirty = 0;
391
392		/*
393		 * Initialize the map to contain one maximal free untouched run.
394		 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
395		 * chunk.
396		 */
397		unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
398		arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
399		    unzeroed);
400		/*
401		 * There is no need to initialize the internal page map entries
402		 * unless the chunk is not zeroed.
403		 */
404		if (zero == false) {
405			for (i = map_bias+1; i < chunk_npages-1; i++)
406				arena_mapbits_unzeroed_set(chunk, i, unzeroed);
407		} else if (config_debug) {
408			for (i = map_bias+1; i < chunk_npages-1; i++) {
409				assert(arena_mapbits_unzeroed_get(chunk, i) ==
410				    unzeroed);
411			}
412		}
413		arena_mapbits_unallocated_set(chunk, chunk_npages-1,
414		    arena_maxclass, unzeroed);
415
416		/* Insert the run into the runs_avail_clean tree. */
417		arena_avail_tree_insert(&arena->runs_avail_clean,
418		    arena_mapp_get(chunk, map_bias));
419	}
420
421	return (chunk);
422}
423
424static void
425arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
426{
427	arena_avail_tree_t *runs_avail;
428
429	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
430	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
431	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
432	    arena_maxclass);
433	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
434	    arena_maxclass);
435	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
436	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
437
438	/*
439	 * Remove run from the appropriate runs_avail_* tree, so that the arena
440	 * does not use it.
441	 */
442	if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
443		runs_avail = &arena->runs_avail_clean;
444	else
445		runs_avail = &arena->runs_avail_dirty;
446	arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, map_bias));
447
448	if (arena->spare != NULL) {
449		arena_chunk_t *spare = arena->spare;
450
451		arena->spare = chunk;
452		if (spare->dirtied) {
453			ql_remove(&chunk->arena->chunks_dirty, spare,
454			    link_dirty);
455			arena->ndirty -= spare->ndirty;
456		}
457		malloc_mutex_unlock(&arena->lock);
458		chunk_dealloc((void *)spare, chunksize, true);
459		malloc_mutex_lock(&arena->lock);
460		if (config_stats)
461			arena->stats.mapped -= chunksize;
462	} else
463		arena->spare = chunk;
464}
465
466static arena_run_t *
467arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
468    bool zero)
469{
470	arena_run_t *run;
471	arena_chunk_map_t *mapelm, key;
472
473	key.bits = size | CHUNK_MAP_KEY;
474	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
475	if (mapelm != NULL) {
476		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
477		size_t pageind = (((uintptr_t)mapelm -
478		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
479		    + map_bias;
480
481		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
482		    LG_PAGE));
483		arena_run_split(arena, run, size, large, binind, zero);
484		return (run);
485	}
486	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
487	if (mapelm != NULL) {
488		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
489		size_t pageind = (((uintptr_t)mapelm -
490		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
491		    + map_bias;
492
493		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
494		    LG_PAGE));
495		arena_run_split(arena, run, size, large, binind, zero);
496		return (run);
497	}
498
499	return (NULL);
500}
501
502static arena_run_t *
503arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
504    bool zero)
505{
506	arena_chunk_t *chunk;
507	arena_run_t *run;
508
509	assert(size <= arena_maxclass);
510	assert((size & PAGE_MASK) == 0);
511	assert((large && binind == BININD_INVALID) || (large == false && binind
512	    != BININD_INVALID));
513
514	/* Search the arena's chunks for the lowest best fit. */
515	run = arena_run_alloc_helper(arena, size, large, binind, zero);
516	if (run != NULL)
517		return (run);
518
519	/*
520	 * No usable runs.  Create a new chunk from which to allocate the run.
521	 */
522	chunk = arena_chunk_alloc(arena);
523	if (chunk != NULL) {
524		run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
525		arena_run_split(arena, run, size, large, binind, zero);
526		return (run);
527	}
528
529	/*
530	 * arena_chunk_alloc() failed, but another thread may have made
531	 * sufficient memory available while this one dropped arena->lock in
532	 * arena_chunk_alloc(), so search one more time.
533	 */
534	return (arena_run_alloc_helper(arena, size, large, binind, zero));
535}
536
537static inline void
538arena_maybe_purge(arena_t *arena)
539{
540
541	/* Enforce opt_lg_dirty_mult. */
542	if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
543	    (arena->ndirty - arena->npurgatory) > chunk_npages &&
544	    (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
545	    arena->npurgatory))
546		arena_purge(arena, false);
547}
548
549static inline void
550arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
551{
552	ql_head(arena_chunk_map_t) mapelms;
553	arena_chunk_map_t *mapelm;
554	size_t pageind, flag_unzeroed;
555	size_t ndirty;
556	size_t nmadvise;
557
558	ql_new(&mapelms);
559
560	flag_unzeroed =
561#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
562   /*
563    * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
564    * mappings, but not for file-backed mappings.
565    */
566	    0
567#else
568	    CHUNK_MAP_UNZEROED
569#endif
570	    ;
571
572	/*
573	 * If chunk is the spare, temporarily re-allocate it, 1) so that its
574	 * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
575	 * completely discarded by another thread while arena->lock is dropped
576	 * by this thread.  Note that the arena_run_dalloc() call will
577	 * implicitly deallocate the chunk, so no explicit action is required
578	 * in this function to deallocate the chunk.
579	 *
580	 * Note that once a chunk contains dirty pages, it cannot again contain
581	 * a single run unless 1) it is a dirty run, or 2) this function purges
582	 * dirty pages and causes the transition to a single clean run.  Thus
583	 * (chunk == arena->spare) is possible, but it is not possible for
584	 * this function to be called on the spare unless it contains a dirty
585	 * run.
586	 */
587	if (chunk == arena->spare) {
588		assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
589		assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
590
591		arena_chunk_alloc(arena);
592	}
593
594	/* Temporarily allocate all free dirty runs within chunk. */
595	for (pageind = map_bias; pageind < chunk_npages;) {
596		mapelm = arena_mapp_get(chunk, pageind);
597		if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
598			size_t npages;
599
600			npages = arena_mapbits_unallocated_size_get(chunk,
601			    pageind) >> LG_PAGE;
602			assert(pageind + npages <= chunk_npages);
603			assert(arena_mapbits_dirty_get(chunk, pageind) ==
604			    arena_mapbits_dirty_get(chunk, pageind+npages-1));
605			if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
606				size_t i;
607
608				arena_avail_tree_remove(
609				    &arena->runs_avail_dirty, mapelm);
610
611				arena_mapbits_unzeroed_set(chunk, pageind,
612				    flag_unzeroed);
613				arena_mapbits_large_set(chunk, pageind,
614				    (npages << LG_PAGE), 0);
615				/*
616				 * Update internal elements in the page map, so
617				 * that CHUNK_MAP_UNZEROED is properly set.
618				 */
619				for (i = 1; i < npages - 1; i++) {
620					arena_mapbits_unzeroed_set(chunk,
621					    pageind+i, flag_unzeroed);
622				}
623				if (npages > 1) {
624					arena_mapbits_unzeroed_set(chunk,
625					    pageind+npages-1, flag_unzeroed);
626					arena_mapbits_large_set(chunk,
627					    pageind+npages-1, 0, 0);
628				}
629
630				if (config_stats) {
631					/*
632					 * Update stats_cactive if nactive is
633					 * crossing a chunk multiple.
634					 */
635					size_t cactive_diff =
636					    CHUNK_CEILING((arena->nactive +
637					    npages) << LG_PAGE) -
638					    CHUNK_CEILING(arena->nactive <<
639					    LG_PAGE);
640					if (cactive_diff != 0)
641						stats_cactive_add(cactive_diff);
642				}
643				arena->nactive += npages;
644				/* Append to list for later processing. */
645				ql_elm_new(mapelm, u.ql_link);
646				ql_tail_insert(&mapelms, mapelm, u.ql_link);
647			}
648
649			pageind += npages;
650		} else {
651			/* Skip allocated run. */
652			if (arena_mapbits_large_get(chunk, pageind))
653				pageind += arena_mapbits_large_size_get(chunk,
654				    pageind) >> LG_PAGE;
655			else {
656				size_t binind;
657				arena_bin_info_t *bin_info;
658				arena_run_t *run = (arena_run_t *)((uintptr_t)
659				    chunk + (uintptr_t)(pageind << LG_PAGE));
660
661				assert(arena_mapbits_small_runind_get(chunk,
662				    pageind) == 0);
663				binind = arena_bin_index(arena, run->bin);
664				bin_info = &arena_bin_info[binind];
665				pageind += bin_info->run_size >> LG_PAGE;
666			}
667		}
668	}
669	assert(pageind == chunk_npages);
670
671	if (config_debug)
672		ndirty = chunk->ndirty;
673	if (config_stats)
674		arena->stats.purged += chunk->ndirty;
675	arena->ndirty -= chunk->ndirty;
676	chunk->ndirty = 0;
677	ql_remove(&arena->chunks_dirty, chunk, link_dirty);
678	chunk->dirtied = false;
679
680	malloc_mutex_unlock(&arena->lock);
681	if (config_stats)
682		nmadvise = 0;
683	ql_foreach(mapelm, &mapelms, u.ql_link) {
684		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
685		    sizeof(arena_chunk_map_t)) + map_bias;
686		size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
687		    LG_PAGE;
688
689		assert(pageind + npages <= chunk_npages);
690		assert(ndirty >= npages);
691		if (config_debug)
692			ndirty -= npages;
693
694		pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
695		    (npages << LG_PAGE));
696		if (config_stats)
697			nmadvise++;
698	}
699	assert(ndirty == 0);
700	malloc_mutex_lock(&arena->lock);
701	if (config_stats)
702		arena->stats.nmadvise += nmadvise;
703
704	/* Deallocate runs. */
705	for (mapelm = ql_first(&mapelms); mapelm != NULL;
706	    mapelm = ql_first(&mapelms)) {
707		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
708		    sizeof(arena_chunk_map_t)) + map_bias;
709		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
710		    (uintptr_t)(pageind << LG_PAGE));
711
712		ql_remove(&mapelms, mapelm, u.ql_link);
713		arena_run_dalloc(arena, run, false);
714	}
715}
716
717static void
718arena_purge(arena_t *arena, bool all)
719{
720	arena_chunk_t *chunk;
721	size_t npurgatory;
722	if (config_debug) {
723		size_t ndirty = 0;
724
725		ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
726		    assert(chunk->dirtied);
727		    ndirty += chunk->ndirty;
728		}
729		assert(ndirty == arena->ndirty);
730	}
731	assert(arena->ndirty > arena->npurgatory || all);
732	assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
733	assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
734	    arena->npurgatory) || all);
735
736	if (config_stats)
737		arena->stats.npurge++;
738
739	/*
740	 * Compute the minimum number of pages that this thread should try to
741	 * purge, and add the result to arena->npurgatory.  This will keep
742	 * multiple threads from racing to reduce ndirty below the threshold.
743	 */
744	npurgatory = arena->ndirty - arena->npurgatory;
745	if (all == false) {
746		assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
747		npurgatory -= arena->nactive >> opt_lg_dirty_mult;
748	}
749	arena->npurgatory += npurgatory;
750
751	while (npurgatory > 0) {
752		/* Get next chunk with dirty pages. */
753		chunk = ql_first(&arena->chunks_dirty);
754		if (chunk == NULL) {
755			/*
756			 * This thread was unable to purge as many pages as
757			 * originally intended, due to races with other threads
758			 * that either did some of the purging work, or re-used
759			 * dirty pages.
760			 */
761			arena->npurgatory -= npurgatory;
762			return;
763		}
764		while (chunk->ndirty == 0) {
765			ql_remove(&arena->chunks_dirty, chunk, link_dirty);
766			chunk->dirtied = false;
767			chunk = ql_first(&arena->chunks_dirty);
768			if (chunk == NULL) {
769				/* Same logic as for above. */
770				arena->npurgatory -= npurgatory;
771				return;
772			}
773		}
774
775		if (chunk->ndirty > npurgatory) {
776			/*
777			 * This thread will, at a minimum, purge all the dirty
778			 * pages in chunk, so set npurgatory to reflect this
779			 * thread's commitment to purge the pages.  This tends
780			 * to reduce the chances of the following scenario:
781			 *
782			 * 1) This thread sets arena->npurgatory such that
783			 *    (arena->ndirty - arena->npurgatory) is at the
784			 *    threshold.
785			 * 2) This thread drops arena->lock.
786			 * 3) Another thread causes one or more pages to be
787			 *    dirtied, and immediately determines that it must
788			 *    purge dirty pages.
789			 *
790			 * If this scenario *does* play out, that's okay,
791			 * because all of the purging work being done really
792			 * needs to happen.
793			 */
794			arena->npurgatory += chunk->ndirty - npurgatory;
795			npurgatory = chunk->ndirty;
796		}
797
798		arena->npurgatory -= chunk->ndirty;
799		npurgatory -= chunk->ndirty;
800		arena_chunk_purge(arena, chunk);
801	}
802}
803
804void
805arena_purge_all(arena_t *arena)
806{
807
808	malloc_mutex_lock(&arena->lock);
809	arena_purge(arena, true);
810	malloc_mutex_unlock(&arena->lock);
811}
812
813static void
814arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
815{
816	arena_chunk_t *chunk;
817	size_t size, run_ind, run_pages, flag_dirty;
818	arena_avail_tree_t *runs_avail;
819
820	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
821	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
822	assert(run_ind >= map_bias);
823	assert(run_ind < chunk_npages);
824	if (arena_mapbits_large_get(chunk, run_ind) != 0) {
825		size = arena_mapbits_large_size_get(chunk, run_ind);
826		assert(size == PAGE ||
827		    arena_mapbits_large_size_get(chunk,
828		    run_ind+(size>>LG_PAGE)-1) == 0);
829	} else {
830		size_t binind = arena_bin_index(arena, run->bin);
831		arena_bin_info_t *bin_info = &arena_bin_info[binind];
832		size = bin_info->run_size;
833	}
834	run_pages = (size >> LG_PAGE);
835	if (config_stats) {
836		/*
837		 * Update stats_cactive if nactive is crossing a chunk
838		 * multiple.
839		 */
840		size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
841		    CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
842		if (cactive_diff != 0)
843			stats_cactive_sub(cactive_diff);
844	}
845	arena->nactive -= run_pages;
846
847	/*
848	 * The run is dirty if the caller claims to have dirtied it, as well as
849	 * if it was already dirty before being allocated.
850	 */
851	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
852	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
853	if (arena_mapbits_dirty_get(chunk, run_ind) != 0)
854		dirty = true;
855	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
856	runs_avail = dirty ? &arena->runs_avail_dirty :
857	    &arena->runs_avail_clean;
858
859	/* Mark pages as unallocated in the chunk map. */
860	if (dirty) {
861		arena_mapbits_unallocated_set(chunk, run_ind, size,
862		    CHUNK_MAP_DIRTY);
863		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
864		    CHUNK_MAP_DIRTY);
865
866		chunk->ndirty += run_pages;
867		arena->ndirty += run_pages;
868	} else {
869		arena_mapbits_unallocated_set(chunk, run_ind, size,
870		    arena_mapbits_unzeroed_get(chunk, run_ind));
871		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
872		    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
873	}
874
875	/* Try to coalesce forward. */
876	if (run_ind + run_pages < chunk_npages &&
877	    arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
878	    arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
879		size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
880		    run_ind+run_pages);
881		size_t nrun_pages = nrun_size >> LG_PAGE;
882
883		/*
884		 * Remove successor from runs_avail; the coalesced run is
885		 * inserted later.
886		 */
887		assert(arena_mapbits_unallocated_size_get(chunk,
888		    run_ind+run_pages+nrun_pages-1) == nrun_size);
889		assert(arena_mapbits_dirty_get(chunk,
890		    run_ind+run_pages+nrun_pages-1) == flag_dirty);
891		arena_avail_tree_remove(runs_avail,
892		    arena_mapp_get(chunk, run_ind+run_pages));
893
894		size += nrun_size;
895		run_pages += nrun_pages;
896
897		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
898		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
899		    size);
900	}
901
902	/* Try to coalesce backward. */
903	if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
904	    == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
905		size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
906		    run_ind-1);
907		size_t prun_pages = prun_size >> LG_PAGE;
908
909		run_ind -= prun_pages;
910
911		/*
912		 * Remove predecessor from runs_avail; the coalesced run is
913		 * inserted later.
914		 */
915		assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
916		    prun_size);
917		assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
918		arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk,
919		    run_ind));
920
921		size += prun_size;
922		run_pages += prun_pages;
923
924		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
925		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
926		    size);
927	}
928
929	/* Insert into runs_avail, now that coalescing is complete. */
930	assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
931	    arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
932	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
933	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
934	arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, run_ind));
935
936	if (dirty) {
937		/*
938		 * Insert into chunks_dirty before potentially calling
939		 * arena_chunk_dealloc(), so that chunks_dirty and
940		 * arena->ndirty are consistent.
941		 */
942		if (chunk->dirtied == false) {
943			ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
944			chunk->dirtied = true;
945		}
946	}
947
948	/* Deallocate chunk if it is now completely unused. */
949	if (size == arena_maxclass) {
950		assert(run_ind == map_bias);
951		assert(run_pages == (arena_maxclass >> LG_PAGE));
952		arena_chunk_dealloc(arena, chunk);
953	}
954
955	/*
956	 * It is okay to do dirty page processing here even if the chunk was
957	 * deallocated above, since in that case it is the spare.  Waiting
958	 * until after possible chunk deallocation to do dirty processing
959	 * allows for an old spare to be fully deallocated, thus decreasing the
960	 * chances of spuriously crossing the dirty page purging threshold.
961	 */
962	if (dirty)
963		arena_maybe_purge(arena);
964}
965
966static void
967arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
968    size_t oldsize, size_t newsize)
969{
970	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
971	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
972	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
973
974	assert(oldsize > newsize);
975
976	/*
977	 * Update the chunk map so that arena_run_dalloc() can treat the
978	 * leading run as separately allocated.  Set the last element of each
979	 * run first, in case of single-page runs.
980	 */
981	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
982	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
983	arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
984
985	if (config_debug) {
986		UNUSED size_t tail_npages = newsize >> LG_PAGE;
987		assert(arena_mapbits_large_size_get(chunk,
988		    pageind+head_npages+tail_npages-1) == 0);
989		assert(arena_mapbits_dirty_get(chunk,
990		    pageind+head_npages+tail_npages-1) == flag_dirty);
991	}
992	arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
993	    flag_dirty);
994
995	arena_run_dalloc(arena, run, false);
996}
997
998static void
999arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1000    size_t oldsize, size_t newsize, bool dirty)
1001{
1002	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1003	size_t head_npages = newsize >> LG_PAGE;
1004	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1005
1006	assert(oldsize > newsize);
1007
1008	/*
1009	 * Update the chunk map so that arena_run_dalloc() can treat the
1010	 * trailing run as separately allocated.  Set the last element of each
1011	 * run first, in case of single-page runs.
1012	 */
1013	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1014	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1015	arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
1016
1017	if (config_debug) {
1018		UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1019		assert(arena_mapbits_large_size_get(chunk,
1020		    pageind+head_npages+tail_npages-1) == 0);
1021		assert(arena_mapbits_dirty_get(chunk,
1022		    pageind+head_npages+tail_npages-1) == flag_dirty);
1023	}
1024	arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
1025	    flag_dirty);
1026
1027	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1028	    dirty);
1029}
1030
1031static arena_run_t *
1032arena_bin_runs_first(arena_bin_t *bin)
1033{
1034	arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1035	if (mapelm != NULL) {
1036		arena_chunk_t *chunk;
1037		size_t pageind;
1038		arena_run_t *run;
1039
1040		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1041		pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1042		    sizeof(arena_chunk_map_t))) + map_bias;
1043		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1044		    arena_mapbits_small_runind_get(chunk, pageind)) <<
1045		    LG_PAGE));
1046		return (run);
1047	}
1048
1049	return (NULL);
1050}
1051
1052static void
1053arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1054{
1055	arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1056	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1057	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1058
1059	assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1060
1061	arena_run_tree_insert(&bin->runs, mapelm);
1062}
1063
1064static void
1065arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1066{
1067	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1068	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1069	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1070
1071	assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1072
1073	arena_run_tree_remove(&bin->runs, mapelm);
1074}
1075
1076static arena_run_t *
1077arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1078{
1079	arena_run_t *run = arena_bin_runs_first(bin);
1080	if (run != NULL) {
1081		arena_bin_runs_remove(bin, run);
1082		if (config_stats)
1083			bin->stats.reruns++;
1084	}
1085	return (run);
1086}
1087
1088static arena_run_t *
1089arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1090{
1091	arena_run_t *run;
1092	size_t binind;
1093	arena_bin_info_t *bin_info;
1094
1095	/* Look for a usable run. */
1096	run = arena_bin_nonfull_run_tryget(bin);
1097	if (run != NULL)
1098		return (run);
1099	/* No existing runs have any space available. */
1100
1101	binind = arena_bin_index(arena, bin);
1102	bin_info = &arena_bin_info[binind];
1103
1104	/* Allocate a new run. */
1105	malloc_mutex_unlock(&bin->lock);
1106	/******************************/
1107	malloc_mutex_lock(&arena->lock);
1108	run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
1109	if (run != NULL) {
1110		bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1111		    (uintptr_t)bin_info->bitmap_offset);
1112
1113		/* Initialize run internals. */
1114		VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
1115		    bin_info->redzone_size);
1116		run->bin = bin;
1117		run->nextind = 0;
1118		run->nfree = bin_info->nregs;
1119		bitmap_init(bitmap, &bin_info->bitmap_info);
1120	}
1121	malloc_mutex_unlock(&arena->lock);
1122	/********************************/
1123	malloc_mutex_lock(&bin->lock);
1124	if (run != NULL) {
1125		if (config_stats) {
1126			bin->stats.nruns++;
1127			bin->stats.curruns++;
1128		}
1129		return (run);
1130	}
1131
1132	/*
1133	 * arena_run_alloc() failed, but another thread may have made
1134	 * sufficient memory available while this one dropped bin->lock above,
1135	 * so search one more time.
1136	 */
1137	run = arena_bin_nonfull_run_tryget(bin);
1138	if (run != NULL)
1139		return (run);
1140
1141	return (NULL);
1142}
1143
1144/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1145static void *
1146arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1147{
1148	void *ret;
1149	size_t binind;
1150	arena_bin_info_t *bin_info;
1151	arena_run_t *run;
1152
1153	binind = arena_bin_index(arena, bin);
1154	bin_info = &arena_bin_info[binind];
1155	bin->runcur = NULL;
1156	run = arena_bin_nonfull_run_get(arena, bin);
1157	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1158		/*
1159		 * Another thread updated runcur while this one ran without the
1160		 * bin lock in arena_bin_nonfull_run_get().
1161		 */
1162		assert(bin->runcur->nfree > 0);
1163		ret = arena_run_reg_alloc(bin->runcur, bin_info);
1164		if (run != NULL) {
1165			arena_chunk_t *chunk;
1166
1167			/*
1168			 * arena_run_alloc() may have allocated run, or it may
1169			 * have pulled run from the bin's run tree.  Therefore
1170			 * it is unsafe to make any assumptions about how run
1171			 * has previously been used, and arena_bin_lower_run()
1172			 * must be called, as if a region were just deallocated
1173			 * from the run.
1174			 */
1175			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1176			if (run->nfree == bin_info->nregs)
1177				arena_dalloc_bin_run(arena, chunk, run, bin);
1178			else
1179				arena_bin_lower_run(arena, chunk, run, bin);
1180		}
1181		return (ret);
1182	}
1183
1184	if (run == NULL)
1185		return (NULL);
1186
1187	bin->runcur = run;
1188
1189	assert(bin->runcur->nfree > 0);
1190
1191	return (arena_run_reg_alloc(bin->runcur, bin_info));
1192}
1193
1194void
1195arena_prof_accum(arena_t *arena, uint64_t accumbytes)
1196{
1197
1198	cassert(config_prof);
1199
1200	if (config_prof && prof_interval != 0) {
1201		arena->prof_accumbytes += accumbytes;
1202		if (arena->prof_accumbytes >= prof_interval) {
1203			prof_idump();
1204			arena->prof_accumbytes -= prof_interval;
1205		}
1206	}
1207}
1208
1209void
1210arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1211    uint64_t prof_accumbytes)
1212{
1213	unsigned i, nfill;
1214	arena_bin_t *bin;
1215	arena_run_t *run;
1216	void *ptr;
1217
1218	assert(tbin->ncached == 0);
1219
1220	if (config_prof) {
1221		malloc_mutex_lock(&arena->lock);
1222		arena_prof_accum(arena, prof_accumbytes);
1223		malloc_mutex_unlock(&arena->lock);
1224	}
1225	bin = &arena->bins[binind];
1226	malloc_mutex_lock(&bin->lock);
1227	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1228	    tbin->lg_fill_div); i < nfill; i++) {
1229		if ((run = bin->runcur) != NULL && run->nfree > 0)
1230			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1231		else
1232			ptr = arena_bin_malloc_hard(arena, bin);
1233		if (ptr == NULL)
1234			break;
1235		if (config_fill && opt_junk) {
1236			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1237			    true);
1238		}
1239		/* Insert such that low regions get used first. */
1240		tbin->avail[nfill - 1 - i] = ptr;
1241	}
1242	if (config_stats) {
1243		bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1244		bin->stats.nmalloc += i;
1245		bin->stats.nrequests += tbin->tstats.nrequests;
1246		bin->stats.nfills++;
1247		tbin->tstats.nrequests = 0;
1248	}
1249	malloc_mutex_unlock(&bin->lock);
1250	tbin->ncached = i;
1251}
1252
1253void
1254arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1255{
1256
1257	if (zero) {
1258		size_t redzone_size = bin_info->redzone_size;
1259		memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1260		    redzone_size);
1261		memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1262		    redzone_size);
1263	} else {
1264		memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1265		    bin_info->reg_interval);
1266	}
1267}
1268
1269void
1270arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1271{
1272	size_t size = bin_info->reg_size;
1273	size_t redzone_size = bin_info->redzone_size;
1274	size_t i;
1275	bool error = false;
1276
1277	for (i = 1; i <= redzone_size; i++) {
1278		unsigned byte;
1279		if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1280			error = true;
1281			malloc_printf("<jemalloc>: Corrupt redzone "
1282			    "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1283			    (i == 1) ? "" : "s", ptr, size, byte);
1284		}
1285	}
1286	for (i = 0; i < redzone_size; i++) {
1287		unsigned byte;
1288		if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1289			error = true;
1290			malloc_printf("<jemalloc>: Corrupt redzone "
1291			    "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1292			    i, (i == 1) ? "" : "s", ptr, size, byte);
1293		}
1294	}
1295	if (opt_abort && error)
1296		abort();
1297
1298	memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1299	    bin_info->reg_interval);
1300}
1301
1302void *
1303arena_malloc_small(arena_t *arena, size_t size, bool zero)
1304{
1305	void *ret;
1306	arena_bin_t *bin;
1307	arena_run_t *run;
1308	size_t binind;
1309
1310	binind = SMALL_SIZE2BIN(size);
1311	assert(binind < NBINS);
1312	bin = &arena->bins[binind];
1313	size = arena_bin_info[binind].reg_size;
1314
1315	malloc_mutex_lock(&bin->lock);
1316	if ((run = bin->runcur) != NULL && run->nfree > 0)
1317		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1318	else
1319		ret = arena_bin_malloc_hard(arena, bin);
1320
1321	if (ret == NULL) {
1322		malloc_mutex_unlock(&bin->lock);
1323		return (NULL);
1324	}
1325
1326	if (config_stats) {
1327		bin->stats.allocated += size;
1328		bin->stats.nmalloc++;
1329		bin->stats.nrequests++;
1330	}
1331	malloc_mutex_unlock(&bin->lock);
1332	if (config_prof && isthreaded == false) {
1333		malloc_mutex_lock(&arena->lock);
1334		arena_prof_accum(arena, size);
1335		malloc_mutex_unlock(&arena->lock);
1336	}
1337
1338	if (zero == false) {
1339		if (config_fill) {
1340			if (opt_junk) {
1341				arena_alloc_junk_small(ret,
1342				    &arena_bin_info[binind], false);
1343			} else if (opt_zero)
1344				memset(ret, 0, size);
1345		}
1346	} else {
1347		if (config_fill && opt_junk) {
1348			arena_alloc_junk_small(ret, &arena_bin_info[binind],
1349			    true);
1350		}
1351		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1352		memset(ret, 0, size);
1353	}
1354
1355	return (ret);
1356}
1357
1358void *
1359arena_malloc_large(arena_t *arena, size_t size, bool zero)
1360{
1361	void *ret;
1362
1363	/* Large allocation. */
1364	size = PAGE_CEILING(size);
1365	malloc_mutex_lock(&arena->lock);
1366	ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
1367	if (ret == NULL) {
1368		malloc_mutex_unlock(&arena->lock);
1369		return (NULL);
1370	}
1371	if (config_stats) {
1372		arena->stats.nmalloc_large++;
1373		arena->stats.nrequests_large++;
1374		arena->stats.allocated_large += size;
1375		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1376		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1377		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1378	}
1379	if (config_prof)
1380		arena_prof_accum(arena, size);
1381	malloc_mutex_unlock(&arena->lock);
1382
1383	if (zero == false) {
1384		if (config_fill) {
1385			if (opt_junk)
1386				memset(ret, 0xa5, size);
1387			else if (opt_zero)
1388				memset(ret, 0, size);
1389		}
1390	}
1391
1392	return (ret);
1393}
1394
1395/* Only handles large allocations that require more than page alignment. */
1396void *
1397arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
1398{
1399	void *ret;
1400	size_t alloc_size, leadsize, trailsize;
1401	arena_run_t *run;
1402	arena_chunk_t *chunk;
1403
1404	assert((size & PAGE_MASK) == 0);
1405
1406	alignment = PAGE_CEILING(alignment);
1407	alloc_size = size + alignment - PAGE;
1408
1409	malloc_mutex_lock(&arena->lock);
1410	run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
1411	if (run == NULL) {
1412		malloc_mutex_unlock(&arena->lock);
1413		return (NULL);
1414	}
1415	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1416
1417	leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1418	    (uintptr_t)run;
1419	assert(alloc_size >= leadsize + size);
1420	trailsize = alloc_size - leadsize - size;
1421	ret = (void *)((uintptr_t)run + leadsize);
1422	if (leadsize != 0) {
1423		arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1424		    leadsize);
1425	}
1426	if (trailsize != 0) {
1427		arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1428		    false);
1429	}
1430
1431	if (config_stats) {
1432		arena->stats.nmalloc_large++;
1433		arena->stats.nrequests_large++;
1434		arena->stats.allocated_large += size;
1435		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1436		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1437		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1438	}
1439	malloc_mutex_unlock(&arena->lock);
1440
1441	if (config_fill && zero == false) {
1442		if (opt_junk)
1443			memset(ret, 0xa5, size);
1444		else if (opt_zero)
1445			memset(ret, 0, size);
1446	}
1447	return (ret);
1448}
1449
1450void
1451arena_prof_promoted(const void *ptr, size_t size)
1452{
1453	arena_chunk_t *chunk;
1454	size_t pageind, binind;
1455
1456	cassert(config_prof);
1457	assert(ptr != NULL);
1458	assert(CHUNK_ADDR2BASE(ptr) != ptr);
1459	assert(isalloc(ptr, false) == PAGE);
1460	assert(isalloc(ptr, true) == PAGE);
1461	assert(size <= SMALL_MAXCLASS);
1462
1463	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1464	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1465	binind = SMALL_SIZE2BIN(size);
1466	assert(binind < NBINS);
1467	arena_mapbits_large_binind_set(chunk, pageind, binind);
1468
1469	assert(isalloc(ptr, false) == PAGE);
1470	assert(isalloc(ptr, true) == size);
1471}
1472
1473static void
1474arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1475    arena_bin_t *bin)
1476{
1477
1478	/* Dissociate run from bin. */
1479	if (run == bin->runcur)
1480		bin->runcur = NULL;
1481	else {
1482		size_t binind = arena_bin_index(chunk->arena, bin);
1483		arena_bin_info_t *bin_info = &arena_bin_info[binind];
1484
1485		if (bin_info->nregs != 1) {
1486			/*
1487			 * This block's conditional is necessary because if the
1488			 * run only contains one region, then it never gets
1489			 * inserted into the non-full runs tree.
1490			 */
1491			arena_bin_runs_remove(bin, run);
1492		}
1493	}
1494}
1495
1496static void
1497arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1498    arena_bin_t *bin)
1499{
1500	size_t binind;
1501	arena_bin_info_t *bin_info;
1502	size_t npages, run_ind, past;
1503
1504	assert(run != bin->runcur);
1505	assert(arena_run_tree_search(&bin->runs,
1506	    arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1507	    == NULL);
1508
1509	binind = arena_bin_index(chunk->arena, run->bin);
1510	bin_info = &arena_bin_info[binind];
1511
1512	malloc_mutex_unlock(&bin->lock);
1513	/******************************/
1514	npages = bin_info->run_size >> LG_PAGE;
1515	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1516	past = (size_t)(PAGE_CEILING((uintptr_t)run +
1517	    (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1518	    bin_info->reg_interval - bin_info->redzone_size) -
1519	    (uintptr_t)chunk) >> LG_PAGE);
1520	malloc_mutex_lock(&arena->lock);
1521
1522	/*
1523	 * If the run was originally clean, and some pages were never touched,
1524	 * trim the clean pages before deallocating the dirty portion of the
1525	 * run.
1526	 */
1527	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1528	    arena_mapbits_dirty_get(chunk, run_ind+npages-1));
1529	if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1530	    npages) {
1531		/* Trim clean pages.  Convert to large run beforehand. */
1532		assert(npages > 0);
1533		arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1534		arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
1535		arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1536		    ((past - run_ind) << LG_PAGE), false);
1537		/* npages = past - run_ind; */
1538	}
1539	arena_run_dalloc(arena, run, true);
1540	malloc_mutex_unlock(&arena->lock);
1541	/****************************/
1542	malloc_mutex_lock(&bin->lock);
1543	if (config_stats)
1544		bin->stats.curruns--;
1545}
1546
1547static void
1548arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1549    arena_bin_t *bin)
1550{
1551
1552	/*
1553	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1554	 * non-full run.  It is okay to NULL runcur out rather than proactively
1555	 * keeping it pointing at the lowest non-full run.
1556	 */
1557	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
1558		/* Switch runcur. */
1559		if (bin->runcur->nfree > 0)
1560			arena_bin_runs_insert(bin, bin->runcur);
1561		bin->runcur = run;
1562		if (config_stats)
1563			bin->stats.reruns++;
1564	} else
1565		arena_bin_runs_insert(bin, run);
1566}
1567
1568void
1569arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1570    arena_chunk_map_t *mapelm)
1571{
1572	size_t pageind;
1573	arena_run_t *run;
1574	arena_bin_t *bin;
1575	arena_bin_info_t *bin_info;
1576	size_t size, binind;
1577
1578	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1579	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1580	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1581	bin = run->bin;
1582	binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
1583	bin_info = &arena_bin_info[binind];
1584	if (config_fill || config_stats)
1585		size = bin_info->reg_size;
1586
1587	if (config_fill && opt_junk)
1588		arena_dalloc_junk_small(ptr, bin_info);
1589
1590	arena_run_reg_dalloc(run, ptr);
1591	if (run->nfree == bin_info->nregs) {
1592		arena_dissociate_bin_run(chunk, run, bin);
1593		arena_dalloc_bin_run(arena, chunk, run, bin);
1594	} else if (run->nfree == 1 && run != bin->runcur)
1595		arena_bin_lower_run(arena, chunk, run, bin);
1596
1597	if (config_stats) {
1598		bin->stats.allocated -= size;
1599		bin->stats.ndalloc++;
1600	}
1601}
1602
1603void
1604arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1605    size_t pageind, arena_chunk_map_t *mapelm)
1606{
1607	arena_run_t *run;
1608	arena_bin_t *bin;
1609
1610	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1611	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1612	bin = run->bin;
1613	malloc_mutex_lock(&bin->lock);
1614	arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1615	malloc_mutex_unlock(&bin->lock);
1616}
1617
1618void
1619arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1620    size_t pageind)
1621{
1622	arena_chunk_map_t *mapelm;
1623
1624	if (config_debug) {
1625		/* arena_ptr_small_binind_get() does extra sanity checking. */
1626		assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1627		    pageind)) != BININD_INVALID);
1628	}
1629	mapelm = arena_mapp_get(chunk, pageind);
1630	arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1631}
1632void
1633arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
1634    arena_stats_t *astats, malloc_bin_stats_t *bstats,
1635    malloc_large_stats_t *lstats)
1636{
1637	unsigned i;
1638
1639	malloc_mutex_lock(&arena->lock);
1640	*nactive += arena->nactive;
1641	*ndirty += arena->ndirty;
1642
1643	astats->mapped += arena->stats.mapped;
1644	astats->npurge += arena->stats.npurge;
1645	astats->nmadvise += arena->stats.nmadvise;
1646	astats->purged += arena->stats.purged;
1647	astats->allocated_large += arena->stats.allocated_large;
1648	astats->nmalloc_large += arena->stats.nmalloc_large;
1649	astats->ndalloc_large += arena->stats.ndalloc_large;
1650	astats->nrequests_large += arena->stats.nrequests_large;
1651
1652	for (i = 0; i < nlclasses; i++) {
1653		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
1654		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
1655		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
1656		lstats[i].curruns += arena->stats.lstats[i].curruns;
1657	}
1658	malloc_mutex_unlock(&arena->lock);
1659
1660	for (i = 0; i < NBINS; i++) {
1661		arena_bin_t *bin = &arena->bins[i];
1662
1663		malloc_mutex_lock(&bin->lock);
1664		bstats[i].allocated += bin->stats.allocated;
1665		bstats[i].nmalloc += bin->stats.nmalloc;
1666		bstats[i].ndalloc += bin->stats.ndalloc;
1667		bstats[i].nrequests += bin->stats.nrequests;
1668		if (config_tcache) {
1669			bstats[i].nfills += bin->stats.nfills;
1670			bstats[i].nflushes += bin->stats.nflushes;
1671		}
1672		bstats[i].nruns += bin->stats.nruns;
1673		bstats[i].reruns += bin->stats.reruns;
1674		bstats[i].curruns += bin->stats.curruns;
1675		malloc_mutex_unlock(&bin->lock);
1676	}
1677}
1678
1679void
1680arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1681{
1682
1683	if (config_fill || config_stats) {
1684		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1685		size_t size = arena_mapbits_large_size_get(chunk, pageind);
1686
1687		if (config_fill && config_stats && opt_junk)
1688			memset(ptr, 0x5a, size);
1689		if (config_stats) {
1690			arena->stats.ndalloc_large++;
1691			arena->stats.allocated_large -= size;
1692			arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1693			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1694		}
1695	}
1696
1697	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
1698}
1699
1700void
1701arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1702{
1703
1704	malloc_mutex_lock(&arena->lock);
1705	arena_dalloc_large_locked(arena, chunk, ptr);
1706	malloc_mutex_unlock(&arena->lock);
1707}
1708
1709static void
1710arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1711    size_t oldsize, size_t size)
1712{
1713
1714	assert(size < oldsize);
1715
1716	/*
1717	 * Shrink the run, and make trailing pages available for other
1718	 * allocations.
1719	 */
1720	malloc_mutex_lock(&arena->lock);
1721	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1722	    true);
1723	if (config_stats) {
1724		arena->stats.ndalloc_large++;
1725		arena->stats.allocated_large -= oldsize;
1726		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1727		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1728
1729		arena->stats.nmalloc_large++;
1730		arena->stats.nrequests_large++;
1731		arena->stats.allocated_large += size;
1732		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1733		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1734		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1735	}
1736	malloc_mutex_unlock(&arena->lock);
1737}
1738
1739static bool
1740arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1741    size_t oldsize, size_t size, size_t extra, bool zero)
1742{
1743	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1744	size_t npages = oldsize >> LG_PAGE;
1745	size_t followsize;
1746
1747	assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
1748
1749	/* Try to extend the run. */
1750	assert(size + extra > oldsize);
1751	malloc_mutex_lock(&arena->lock);
1752	if (pageind + npages < chunk_npages &&
1753	    arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1754	    (followsize = arena_mapbits_unallocated_size_get(chunk,
1755	    pageind+npages)) >= size - oldsize) {
1756		/*
1757		 * The next run is available and sufficiently large.  Split the
1758		 * following run, then merge the first part with the existing
1759		 * allocation.
1760		 */
1761		size_t flag_dirty;
1762		size_t splitsize = (oldsize + followsize <= size + extra)
1763		    ? followsize : size + extra - oldsize;
1764		arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1765		    ((pageind+npages) << LG_PAGE)), splitsize, true,
1766		    BININD_INVALID, zero);
1767
1768		size = oldsize + splitsize;
1769		npages = size >> LG_PAGE;
1770
1771		/*
1772		 * Mark the extended run as dirty if either portion of the run
1773		 * was dirty before allocation.  This is rather pedantic,
1774		 * because there's not actually any sequence of events that
1775		 * could cause the resulting run to be passed to
1776		 * arena_run_dalloc() with the dirty argument set to false
1777		 * (which is when dirty flag consistency would really matter).
1778		 */
1779		flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1780		    arena_mapbits_dirty_get(chunk, pageind+npages-1);
1781		arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1782		arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
1783
1784		if (config_stats) {
1785			arena->stats.ndalloc_large++;
1786			arena->stats.allocated_large -= oldsize;
1787			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1788			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1789
1790			arena->stats.nmalloc_large++;
1791			arena->stats.nrequests_large++;
1792			arena->stats.allocated_large += size;
1793			arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1794			arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1795			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1796		}
1797		malloc_mutex_unlock(&arena->lock);
1798		return (false);
1799	}
1800	malloc_mutex_unlock(&arena->lock);
1801
1802	return (true);
1803}
1804
1805/*
1806 * Try to resize a large allocation, in order to avoid copying.  This will
1807 * always fail if growing an object, and the following run is already in use.
1808 */
1809static bool
1810arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1811    bool zero)
1812{
1813	size_t psize;
1814
1815	psize = PAGE_CEILING(size + extra);
1816	if (psize == oldsize) {
1817		/* Same size class. */
1818		if (config_fill && opt_junk && size < oldsize) {
1819			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
1820			    size);
1821		}
1822		return (false);
1823	} else {
1824		arena_chunk_t *chunk;
1825		arena_t *arena;
1826
1827		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1828		arena = chunk->arena;
1829
1830		if (psize < oldsize) {
1831			/* Fill before shrinking in order avoid a race. */
1832			if (config_fill && opt_junk) {
1833				memset((void *)((uintptr_t)ptr + size), 0x5a,
1834				    oldsize - size);
1835			}
1836			arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1837			    psize);
1838			return (false);
1839		} else {
1840			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
1841			    oldsize, PAGE_CEILING(size),
1842			    psize - PAGE_CEILING(size), zero);
1843			if (config_fill && ret == false && zero == false &&
1844			    opt_zero) {
1845				memset((void *)((uintptr_t)ptr + oldsize), 0,
1846				    size - oldsize);
1847			}
1848			return (ret);
1849		}
1850	}
1851}
1852
1853void *
1854arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1855    bool zero)
1856{
1857
1858	/*
1859	 * Avoid moving the allocation if the size class can be left the same.
1860	 */
1861	if (oldsize <= arena_maxclass) {
1862		if (oldsize <= SMALL_MAXCLASS) {
1863			assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1864			    == oldsize);
1865			if ((size + extra <= SMALL_MAXCLASS &&
1866			    SMALL_SIZE2BIN(size + extra) ==
1867			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
1868			    size + extra >= oldsize)) {
1869				if (config_fill && opt_junk && size < oldsize) {
1870					memset((void *)((uintptr_t)ptr + size),
1871					    0x5a, oldsize - size);
1872				}
1873				return (ptr);
1874			}
1875		} else {
1876			assert(size <= arena_maxclass);
1877			if (size + extra > SMALL_MAXCLASS) {
1878				if (arena_ralloc_large(ptr, oldsize, size,
1879				    extra, zero) == false)
1880					return (ptr);
1881			}
1882		}
1883	}
1884
1885	/* Reallocation would require a move. */
1886	return (NULL);
1887}
1888
1889void *
1890arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
1891    size_t alignment, bool zero, bool try_tcache)
1892{
1893	void *ret;
1894	size_t copysize;
1895
1896	/* Try to avoid moving the allocation. */
1897	ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
1898	if (ret != NULL)
1899		return (ret);
1900
1901	/*
1902	 * size and oldsize are different enough that we need to move the
1903	 * object.  In that case, fall back to allocating new space and
1904	 * copying.
1905	 */
1906	if (alignment != 0) {
1907		size_t usize = sa2u(size + extra, alignment);
1908		if (usize == 0)
1909			return (NULL);
1910		ret = ipalloc(usize, alignment, zero);
1911	} else
1912		ret = arena_malloc(NULL, size + extra, zero, try_tcache);
1913
1914	if (ret == NULL) {
1915		if (extra == 0)
1916			return (NULL);
1917		/* Try again, this time without extra. */
1918		if (alignment != 0) {
1919			size_t usize = sa2u(size, alignment);
1920			if (usize == 0)
1921				return (NULL);
1922			ret = ipalloc(usize, alignment, zero);
1923		} else
1924			ret = arena_malloc(NULL, size, zero, try_tcache);
1925
1926		if (ret == NULL)
1927			return (NULL);
1928	}
1929
1930	/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
1931
1932	/*
1933	 * Copy at most size bytes (not size+extra), since the caller has no
1934	 * expectation that the extra bytes will be reliably preserved.
1935	 */
1936	copysize = (size < oldsize) ? size : oldsize;
1937	VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
1938	memcpy(ret, ptr, copysize);
1939	iqalloc(ptr);
1940	return (ret);
1941}
1942
1943bool
1944arena_new(arena_t *arena, unsigned ind)
1945{
1946	unsigned i;
1947	arena_bin_t *bin;
1948
1949	arena->ind = ind;
1950	arena->nthreads = 0;
1951
1952	if (malloc_mutex_init(&arena->lock))
1953		return (true);
1954
1955	if (config_stats) {
1956		memset(&arena->stats, 0, sizeof(arena_stats_t));
1957		arena->stats.lstats =
1958		    (malloc_large_stats_t *)base_alloc(nlclasses *
1959		    sizeof(malloc_large_stats_t));
1960		if (arena->stats.lstats == NULL)
1961			return (true);
1962		memset(arena->stats.lstats, 0, nlclasses *
1963		    sizeof(malloc_large_stats_t));
1964		if (config_tcache)
1965			ql_new(&arena->tcache_ql);
1966	}
1967
1968	if (config_prof)
1969		arena->prof_accumbytes = 0;
1970
1971	/* Initialize chunks. */
1972	ql_new(&arena->chunks_dirty);
1973	arena->spare = NULL;
1974
1975	arena->nactive = 0;
1976	arena->ndirty = 0;
1977	arena->npurgatory = 0;
1978
1979	arena_avail_tree_new(&arena->runs_avail_clean);
1980	arena_avail_tree_new(&arena->runs_avail_dirty);
1981
1982	/* Initialize bins. */
1983	for (i = 0; i < NBINS; i++) {
1984		bin = &arena->bins[i];
1985		if (malloc_mutex_init(&bin->lock))
1986			return (true);
1987		bin->runcur = NULL;
1988		arena_run_tree_new(&bin->runs);
1989		if (config_stats)
1990			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
1991	}
1992
1993	return (false);
1994}
1995
1996/*
1997 * Calculate bin_info->run_size such that it meets the following constraints:
1998 *
1999 *   *) bin_info->run_size >= min_run_size
2000 *   *) bin_info->run_size <= arena_maxclass
2001 *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2002 *   *) bin_info->nregs <= RUN_MAXREGS
2003 *
2004 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2005 * calculated here, since these settings are all interdependent.
2006 */
2007static size_t
2008bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2009{
2010	size_t pad_size;
2011	size_t try_run_size, good_run_size;
2012	uint32_t try_nregs, good_nregs;
2013	uint32_t try_hdr_size, good_hdr_size;
2014	uint32_t try_bitmap_offset, good_bitmap_offset;
2015	uint32_t try_ctx0_offset, good_ctx0_offset;
2016	uint32_t try_redzone0_offset, good_redzone0_offset;
2017
2018	assert(min_run_size >= PAGE);
2019	assert(min_run_size <= arena_maxclass);
2020
2021	/*
2022	 * Determine redzone size based on minimum alignment and minimum
2023	 * redzone size.  Add padding to the end of the run if it is needed to
2024	 * align the regions.  The padding allows each redzone to be half the
2025	 * minimum alignment; without the padding, each redzone would have to
2026	 * be twice as large in order to maintain alignment.
2027	 */
2028	if (config_fill && opt_redzone) {
2029		size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2030		if (align_min <= REDZONE_MINSIZE) {
2031			bin_info->redzone_size = REDZONE_MINSIZE;
2032			pad_size = 0;
2033		} else {
2034			bin_info->redzone_size = align_min >> 1;
2035			pad_size = bin_info->redzone_size;
2036		}
2037	} else {
2038		bin_info->redzone_size = 0;
2039		pad_size = 0;
2040	}
2041	bin_info->reg_interval = bin_info->reg_size +
2042	    (bin_info->redzone_size << 1);
2043
2044	/*
2045	 * Calculate known-valid settings before entering the run_size
2046	 * expansion loop, so that the first part of the loop always copies
2047	 * valid settings.
2048	 *
2049	 * The do..while loop iteratively reduces the number of regions until
2050	 * the run header and the regions no longer overlap.  A closed formula
2051	 * would be quite messy, since there is an interdependency between the
2052	 * header's mask length and the number of regions.
2053	 */
2054	try_run_size = min_run_size;
2055	try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2056	    bin_info->reg_interval)
2057	    + 1; /* Counter-act try_nregs-- in loop. */
2058	if (try_nregs > RUN_MAXREGS) {
2059		try_nregs = RUN_MAXREGS
2060		    + 1; /* Counter-act try_nregs-- in loop. */
2061	}
2062	do {
2063		try_nregs--;
2064		try_hdr_size = sizeof(arena_run_t);
2065		/* Pad to a long boundary. */
2066		try_hdr_size = LONG_CEILING(try_hdr_size);
2067		try_bitmap_offset = try_hdr_size;
2068		/* Add space for bitmap. */
2069		try_hdr_size += bitmap_size(try_nregs);
2070		if (config_prof && opt_prof && prof_promote == false) {
2071			/* Pad to a quantum boundary. */
2072			try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2073			try_ctx0_offset = try_hdr_size;
2074			/* Add space for one (prof_ctx_t *) per region. */
2075			try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2076		} else
2077			try_ctx0_offset = 0;
2078		try_redzone0_offset = try_run_size - (try_nregs *
2079		    bin_info->reg_interval) - pad_size;
2080	} while (try_hdr_size > try_redzone0_offset);
2081
2082	/* run_size expansion loop. */
2083	do {
2084		/*
2085		 * Copy valid settings before trying more aggressive settings.
2086		 */
2087		good_run_size = try_run_size;
2088		good_nregs = try_nregs;
2089		good_hdr_size = try_hdr_size;
2090		good_bitmap_offset = try_bitmap_offset;
2091		good_ctx0_offset = try_ctx0_offset;
2092		good_redzone0_offset = try_redzone0_offset;
2093
2094		/* Try more aggressive settings. */
2095		try_run_size += PAGE;
2096		try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2097		    bin_info->reg_interval)
2098		    + 1; /* Counter-act try_nregs-- in loop. */
2099		if (try_nregs > RUN_MAXREGS) {
2100			try_nregs = RUN_MAXREGS
2101			    + 1; /* Counter-act try_nregs-- in loop. */
2102		}
2103		do {
2104			try_nregs--;
2105			try_hdr_size = sizeof(arena_run_t);
2106			/* Pad to a long boundary. */
2107			try_hdr_size = LONG_CEILING(try_hdr_size);
2108			try_bitmap_offset = try_hdr_size;
2109			/* Add space for bitmap. */
2110			try_hdr_size += bitmap_size(try_nregs);
2111			if (config_prof && opt_prof && prof_promote == false) {
2112				/* Pad to a quantum boundary. */
2113				try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2114				try_ctx0_offset = try_hdr_size;
2115				/*
2116				 * Add space for one (prof_ctx_t *) per region.
2117				 */
2118				try_hdr_size += try_nregs *
2119				    sizeof(prof_ctx_t *);
2120			}
2121			try_redzone0_offset = try_run_size - (try_nregs *
2122			    bin_info->reg_interval) - pad_size;
2123		} while (try_hdr_size > try_redzone0_offset);
2124	} while (try_run_size <= arena_maxclass
2125	    && try_run_size <= arena_maxclass
2126	    && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2127	    RUN_MAX_OVRHD_RELAX
2128	    && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
2129	    && try_nregs < RUN_MAXREGS);
2130
2131	assert(good_hdr_size <= good_redzone0_offset);
2132
2133	/* Copy final settings. */
2134	bin_info->run_size = good_run_size;
2135	bin_info->nregs = good_nregs;
2136	bin_info->bitmap_offset = good_bitmap_offset;
2137	bin_info->ctx0_offset = good_ctx0_offset;
2138	bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2139
2140	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2141	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
2142
2143	return (good_run_size);
2144}
2145
2146static void
2147bin_info_init(void)
2148{
2149	arena_bin_info_t *bin_info;
2150	size_t prev_run_size = PAGE;
2151
2152#define	SIZE_CLASS(bin, delta, size)					\
2153	bin_info = &arena_bin_info[bin];				\
2154	bin_info->reg_size = size;					\
2155	prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2156	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2157	SIZE_CLASSES
2158#undef SIZE_CLASS
2159}
2160
2161void
2162arena_boot(void)
2163{
2164	size_t header_size;
2165	unsigned i;
2166
2167	/*
2168	 * Compute the header size such that it is large enough to contain the
2169	 * page map.  The page map is biased to omit entries for the header
2170	 * itself, so some iteration is necessary to compute the map bias.
2171	 *
2172	 * 1) Compute safe header_size and map_bias values that include enough
2173	 *    space for an unbiased page map.
2174	 * 2) Refine map_bias based on (1) to omit the header pages in the page
2175	 *    map.  The resulting map_bias may be one too small.
2176	 * 3) Refine map_bias based on (2).  The result will be >= the result
2177	 *    from (2), and will always be correct.
2178	 */
2179	map_bias = 0;
2180	for (i = 0; i < 3; i++) {
2181		header_size = offsetof(arena_chunk_t, map) +
2182		    (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2183		map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2184		    != 0);
2185	}
2186	assert(map_bias > 0);
2187
2188	arena_maxclass = chunksize - (map_bias << LG_PAGE);
2189
2190	bin_info_init();
2191}
2192
2193void
2194arena_prefork(arena_t *arena)
2195{
2196	unsigned i;
2197
2198	malloc_mutex_prefork(&arena->lock);
2199	for (i = 0; i < NBINS; i++)
2200		malloc_mutex_prefork(&arena->bins[i].lock);
2201}
2202
2203void
2204arena_postfork_parent(arena_t *arena)
2205{
2206	unsigned i;
2207
2208	for (i = 0; i < NBINS; i++)
2209		malloc_mutex_postfork_parent(&arena->bins[i].lock);
2210	malloc_mutex_postfork_parent(&arena->lock);
2211}
2212
2213void
2214arena_postfork_child(arena_t *arena)
2215{
2216	unsigned i;
2217
2218	for (i = 0; i < NBINS; i++)
2219		malloc_mutex_postfork_child(&arena->bins[i].lock);
2220	malloc_mutex_postfork_child(&arena->lock);
2221}
2222