1#ifndef JEMALLOC_INTERNAL_H
2#define	JEMALLOC_INTERNAL_H
3
4#include "jemalloc_internal_defs.h"
5#include "jemalloc/internal/jemalloc_internal_decls.h"
6
7#ifdef JEMALLOC_UTRACE
8#include <sys/ktrace.h>
9#endif
10
11#include "un-namespace.h"
12#include "libc_private.h"
13
14#define	JEMALLOC_NO_DEMANGLE
15#ifdef JEMALLOC_JET
16#  define JEMALLOC_N(n) jet_##n
17#  include "jemalloc/internal/public_namespace.h"
18#  define JEMALLOC_NO_RENAME
19#  include "../jemalloc.h"
20#  undef JEMALLOC_NO_RENAME
21#else
22#  define JEMALLOC_N(n) __je_##n
23#  include "../jemalloc.h"
24#endif
25#include "jemalloc/internal/private_namespace.h"
26
27static const bool config_debug =
28#ifdef JEMALLOC_DEBUG
29    true
30#else
31    false
32#endif
33    ;
34static const bool have_dss =
35#ifdef JEMALLOC_DSS
36    true
37#else
38    false
39#endif
40    ;
41static const bool config_fill =
42#ifdef JEMALLOC_FILL
43    true
44#else
45    false
46#endif
47    ;
48static const bool config_lazy_lock = true;
49static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
50static const bool config_prof =
51#ifdef JEMALLOC_PROF
52    true
53#else
54    false
55#endif
56    ;
57static const bool config_prof_libgcc =
58#ifdef JEMALLOC_PROF_LIBGCC
59    true
60#else
61    false
62#endif
63    ;
64static const bool config_prof_libunwind =
65#ifdef JEMALLOC_PROF_LIBUNWIND
66    true
67#else
68    false
69#endif
70    ;
71static const bool maps_coalesce =
72#ifdef JEMALLOC_MAPS_COALESCE
73    true
74#else
75    false
76#endif
77    ;
78static const bool config_munmap =
79#ifdef JEMALLOC_MUNMAP
80    true
81#else
82    false
83#endif
84    ;
85static const bool config_stats =
86#ifdef JEMALLOC_STATS
87    true
88#else
89    false
90#endif
91    ;
92static const bool config_tcache =
93#ifdef JEMALLOC_TCACHE
94    true
95#else
96    false
97#endif
98    ;
99static const bool config_tls =
100#ifdef JEMALLOC_TLS
101    true
102#else
103    false
104#endif
105    ;
106static const bool config_utrace =
107#ifdef JEMALLOC_UTRACE
108    true
109#else
110    false
111#endif
112    ;
113static const bool config_valgrind =
114#ifdef JEMALLOC_VALGRIND
115    true
116#else
117    false
118#endif
119    ;
120static const bool config_xmalloc =
121#ifdef JEMALLOC_XMALLOC
122    true
123#else
124    false
125#endif
126    ;
127static const bool config_ivsalloc =
128#ifdef JEMALLOC_IVSALLOC
129    true
130#else
131    false
132#endif
133    ;
134static const bool config_cache_oblivious =
135#ifdef JEMALLOC_CACHE_OBLIVIOUS
136    true
137#else
138    false
139#endif
140    ;
141
142#ifdef JEMALLOC_C11ATOMICS
143#include <stdatomic.h>
144#endif
145
146#ifdef JEMALLOC_ATOMIC9
147#include <machine/atomic.h>
148#endif
149
150#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
151#include <libkern/OSAtomic.h>
152#endif
153
154#ifdef JEMALLOC_ZONE
155#include <mach/mach_error.h>
156#include <mach/mach_init.h>
157#include <mach/vm_map.h>
158#include <malloc/malloc.h>
159#endif
160
161#include "jemalloc/internal/ph.h"
162#define	RB_COMPACT
163#include "jemalloc/internal/rb.h"
164#include "jemalloc/internal/qr.h"
165#include "jemalloc/internal/ql.h"
166
167/*
168 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
169 * but there are circular dependencies that cannot be broken without
170 * substantial performance degradation.  In order to reduce the effect on
171 * visual code flow, read the header files in multiple passes, with one of the
172 * following cpp variables defined during each pass:
173 *
174 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
175 *                        types.
176 *   JEMALLOC_H_STRUCTS : Data structures.
177 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
178 *   JEMALLOC_H_INLINES : Inline functions.
179 */
180/******************************************************************************/
181#define	JEMALLOC_H_TYPES
182
183#include "jemalloc/internal/jemalloc_internal_macros.h"
184
185/* Size class index type. */
186typedef unsigned szind_t;
187
188/*
189 * Flags bits:
190 *
191 * a: arena
192 * t: tcache
193 * 0: unused
194 * z: zero
195 * n: alignment
196 *
197 * aaaaaaaa aaaatttt tttttttt 0znnnnnn
198 */
199#define	MALLOCX_ARENA_MASK	((int)~0xfffff)
200#define	MALLOCX_ARENA_MAX	0xffe
201#define	MALLOCX_TCACHE_MASK	((int)~0xfff000ffU)
202#define	MALLOCX_TCACHE_MAX	0xffd
203#define	MALLOCX_LG_ALIGN_MASK	((int)0x3f)
204/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
205#define	MALLOCX_ALIGN_GET_SPECIFIED(flags)				\
206    (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
207#define	MALLOCX_ALIGN_GET(flags)					\
208    (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
209#define	MALLOCX_ZERO_GET(flags)						\
210    ((bool)(flags & MALLOCX_ZERO))
211
212#define	MALLOCX_TCACHE_GET(flags)					\
213    (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
214#define	MALLOCX_ARENA_GET(flags)					\
215    (((unsigned)(((unsigned)flags) >> 20)) - 1)
216
217/* Smallest size class to support. */
218#define	TINY_MIN		(1U << LG_TINY_MIN)
219
220/*
221 * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
222 * classes).
223 */
224#ifndef LG_QUANTUM
225#  if (defined(__i386__) || defined(_M_IX86))
226#    define LG_QUANTUM		4
227#  endif
228#  ifdef __ia64__
229#    define LG_QUANTUM		4
230#  endif
231#  ifdef __alpha__
232#    define LG_QUANTUM		4
233#  endif
234#  if (defined(__sparc64__) || defined(__sparcv9))
235#    define LG_QUANTUM		4
236#  endif
237#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
238#    define LG_QUANTUM		4
239#  endif
240#  ifdef __arm__
241#    define LG_QUANTUM		3
242#  endif
243#  ifdef __aarch64__
244#    define LG_QUANTUM		4
245#  endif
246#  ifdef __hppa__
247#    define LG_QUANTUM		4
248#  endif
249#  ifdef __mips__
250#    define LG_QUANTUM		3
251#  endif
252#  ifdef __or1k__
253#    define LG_QUANTUM		3
254#  endif
255#  ifdef __powerpc__
256#    define LG_QUANTUM		4
257#  endif
258#  ifdef __riscv__
259#    define LG_QUANTUM		4
260#  endif
261#  ifdef __s390__
262#    define LG_QUANTUM		4
263#  endif
264#  ifdef __SH4__
265#    define LG_QUANTUM		4
266#  endif
267#  ifdef __tile__
268#    define LG_QUANTUM		4
269#  endif
270#  ifdef __le32__
271#    define LG_QUANTUM		4
272#  endif
273#  ifndef LG_QUANTUM
274#    error "Unknown minimum alignment for architecture; specify via "
275	 "--with-lg-quantum"
276#  endif
277#endif
278
279#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
280#define	QUANTUM_MASK		(QUANTUM - 1)
281
282/* Return the smallest quantum multiple that is >= a. */
283#define	QUANTUM_CEILING(a)						\
284	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
285
286#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
287#define	LONG_MASK		(LONG - 1)
288
289/* Return the smallest long multiple that is >= a. */
290#define	LONG_CEILING(a)							\
291	(((a) + LONG_MASK) & ~LONG_MASK)
292
293#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
294#define	PTR_MASK		(SIZEOF_PTR - 1)
295
296/* Return the smallest (void *) multiple that is >= a. */
297#define	PTR_CEILING(a)							\
298	(((a) + PTR_MASK) & ~PTR_MASK)
299
300/*
301 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
302 * In addition, this controls the spacing of cacheline-spaced size classes.
303 *
304 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
305 * only handle raw constants.
306 */
307#define	LG_CACHELINE		6
308#define	CACHELINE		64
309#define	CACHELINE_MASK		(CACHELINE - 1)
310
311/* Return the smallest cacheline multiple that is >= s. */
312#define	CACHELINE_CEILING(s)						\
313	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
314
315/* Page size.  LG_PAGE is determined by the configure script. */
316#ifdef PAGE_MASK
317#  undef PAGE_MASK
318#endif
319#define	PAGE		((size_t)(1U << LG_PAGE))
320#define	PAGE_MASK	((size_t)(PAGE - 1))
321
322/* Return the page base address for the page containing address a. */
323#define	PAGE_ADDR2BASE(a)						\
324	((void *)((uintptr_t)(a) & ~PAGE_MASK))
325
326/* Return the smallest pagesize multiple that is >= s. */
327#define	PAGE_CEILING(s)							\
328	(((s) + PAGE_MASK) & ~PAGE_MASK)
329
330/* Return the nearest aligned address at or below a. */
331#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
332	((void *)((uintptr_t)(a) & (-(alignment))))
333
334/* Return the offset between a and the nearest aligned address at or below a. */
335#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
336	((size_t)((uintptr_t)(a) & (alignment - 1)))
337
338/* Return the smallest alignment multiple that is >= s. */
339#define	ALIGNMENT_CEILING(s, alignment)					\
340	(((s) + (alignment - 1)) & (-(alignment)))
341
342/* Declare a variable-length array. */
343#if __STDC_VERSION__ < 199901L
344#  ifdef _MSC_VER
345#    include <malloc.h>
346#    define alloca _alloca
347#  else
348#    ifdef JEMALLOC_HAS_ALLOCA_H
349#      include <alloca.h>
350#    else
351#      include <stdlib.h>
352#    endif
353#  endif
354#  define VARIABLE_ARRAY(type, name, count) \
355	type *name = alloca(sizeof(type) * (count))
356#else
357#  define VARIABLE_ARRAY(type, name, count) type name[(count)]
358#endif
359
360#include "jemalloc/internal/nstime.h"
361#include "jemalloc/internal/valgrind.h"
362#include "jemalloc/internal/util.h"
363#include "jemalloc/internal/atomic.h"
364#include "jemalloc/internal/prng.h"
365#include "jemalloc/internal/ticker.h"
366#include "jemalloc/internal/ckh.h"
367#include "jemalloc/internal/size_classes.h"
368#include "jemalloc/internal/smoothstep.h"
369#include "jemalloc/internal/stats.h"
370#include "jemalloc/internal/ctl.h"
371#include "jemalloc/internal/witness.h"
372#include "jemalloc/internal/mutex.h"
373#include "jemalloc/internal/tsd.h"
374#include "jemalloc/internal/mb.h"
375#include "jemalloc/internal/extent.h"
376#include "jemalloc/internal/arena.h"
377#include "jemalloc/internal/bitmap.h"
378#include "jemalloc/internal/base.h"
379#include "jemalloc/internal/rtree.h"
380#include "jemalloc/internal/pages.h"
381#include "jemalloc/internal/chunk.h"
382#include "jemalloc/internal/huge.h"
383#include "jemalloc/internal/tcache.h"
384#include "jemalloc/internal/hash.h"
385#include "jemalloc/internal/quarantine.h"
386#include "jemalloc/internal/prof.h"
387
388#undef JEMALLOC_H_TYPES
389/******************************************************************************/
390#define	JEMALLOC_H_STRUCTS
391
392#include "jemalloc/internal/nstime.h"
393#include "jemalloc/internal/valgrind.h"
394#include "jemalloc/internal/util.h"
395#include "jemalloc/internal/atomic.h"
396#include "jemalloc/internal/prng.h"
397#include "jemalloc/internal/ticker.h"
398#include "jemalloc/internal/ckh.h"
399#include "jemalloc/internal/size_classes.h"
400#include "jemalloc/internal/smoothstep.h"
401#include "jemalloc/internal/stats.h"
402#include "jemalloc/internal/ctl.h"
403#include "jemalloc/internal/witness.h"
404#include "jemalloc/internal/mutex.h"
405#include "jemalloc/internal/mb.h"
406#include "jemalloc/internal/bitmap.h"
407#define	JEMALLOC_ARENA_STRUCTS_A
408#include "jemalloc/internal/arena.h"
409#undef JEMALLOC_ARENA_STRUCTS_A
410#include "jemalloc/internal/extent.h"
411#define	JEMALLOC_ARENA_STRUCTS_B
412#include "jemalloc/internal/arena.h"
413#undef JEMALLOC_ARENA_STRUCTS_B
414#include "jemalloc/internal/base.h"
415#include "jemalloc/internal/rtree.h"
416#include "jemalloc/internal/pages.h"
417#include "jemalloc/internal/chunk.h"
418#include "jemalloc/internal/huge.h"
419#include "jemalloc/internal/tcache.h"
420#include "jemalloc/internal/hash.h"
421#include "jemalloc/internal/quarantine.h"
422#include "jemalloc/internal/prof.h"
423
424#include "jemalloc/internal/tsd.h"
425
426#undef JEMALLOC_H_STRUCTS
427/******************************************************************************/
428#define	JEMALLOC_H_EXTERNS
429
430extern bool	opt_abort;
431extern const char	*opt_junk;
432extern bool	opt_junk_alloc;
433extern bool	opt_junk_free;
434extern size_t	opt_quarantine;
435extern bool	opt_redzone;
436extern bool	opt_utrace;
437extern bool	opt_xmalloc;
438extern bool	opt_zero;
439extern unsigned	opt_narenas;
440
441extern bool	in_valgrind;
442
443/* Number of CPUs. */
444extern unsigned	ncpus;
445
446/* Number of arenas used for automatic multiplexing of threads and arenas. */
447extern unsigned	narenas_auto;
448
449/*
450 * Arenas that are used to service external requests.  Not all elements of the
451 * arenas array are necessarily used; arenas are created lazily as needed.
452 */
453extern arena_t	**arenas;
454
455/*
456 * index2size_tab encodes the same information as could be computed (at
457 * unacceptable cost in some code paths) by index2size_compute().
458 */
459extern size_t const	index2size_tab[NSIZES+1];
460/*
461 * size2index_tab is a compact lookup table that rounds request sizes up to
462 * size classes.  In order to reduce cache footprint, the table is compressed,
463 * and all accesses are via size2index().
464 */
465extern uint8_t const	size2index_tab[];
466
467void	*a0malloc(size_t size);
468void	a0dalloc(void *ptr);
469void	*bootstrap_malloc(size_t size);
470void	*bootstrap_calloc(size_t num, size_t size);
471void	bootstrap_free(void *ptr);
472unsigned	narenas_total_get(void);
473arena_t	*arena_init(tsdn_t *tsdn, unsigned ind);
474arena_tdata_t	*arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
475arena_t	*arena_choose_hard(tsd_t *tsd, bool internal);
476void	arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
477void	thread_allocated_cleanup(tsd_t *tsd);
478void	thread_deallocated_cleanup(tsd_t *tsd);
479void	iarena_cleanup(tsd_t *tsd);
480void	arena_cleanup(tsd_t *tsd);
481void	arenas_tdata_cleanup(tsd_t *tsd);
482void	narenas_tdata_cleanup(tsd_t *tsd);
483void	arenas_tdata_bypass_cleanup(tsd_t *tsd);
484void	jemalloc_prefork(void);
485void	jemalloc_postfork_parent(void);
486void	jemalloc_postfork_child(void);
487
488#include "jemalloc/internal/nstime.h"
489#include "jemalloc/internal/valgrind.h"
490#include "jemalloc/internal/util.h"
491#include "jemalloc/internal/atomic.h"
492#include "jemalloc/internal/prng.h"
493#include "jemalloc/internal/ticker.h"
494#include "jemalloc/internal/ckh.h"
495#include "jemalloc/internal/size_classes.h"
496#include "jemalloc/internal/smoothstep.h"
497#include "jemalloc/internal/stats.h"
498#include "jemalloc/internal/ctl.h"
499#include "jemalloc/internal/witness.h"
500#include "jemalloc/internal/mutex.h"
501#include "jemalloc/internal/mb.h"
502#include "jemalloc/internal/bitmap.h"
503#include "jemalloc/internal/extent.h"
504#include "jemalloc/internal/arena.h"
505#include "jemalloc/internal/base.h"
506#include "jemalloc/internal/rtree.h"
507#include "jemalloc/internal/pages.h"
508#include "jemalloc/internal/chunk.h"
509#include "jemalloc/internal/huge.h"
510#include "jemalloc/internal/tcache.h"
511#include "jemalloc/internal/hash.h"
512#include "jemalloc/internal/quarantine.h"
513#include "jemalloc/internal/prof.h"
514#include "jemalloc/internal/tsd.h"
515
516#undef JEMALLOC_H_EXTERNS
517/******************************************************************************/
518#define	JEMALLOC_H_INLINES
519
520#include "jemalloc/internal/nstime.h"
521#include "jemalloc/internal/valgrind.h"
522#include "jemalloc/internal/util.h"
523#include "jemalloc/internal/atomic.h"
524#include "jemalloc/internal/prng.h"
525#include "jemalloc/internal/ticker.h"
526#include "jemalloc/internal/ckh.h"
527#include "jemalloc/internal/size_classes.h"
528#include "jemalloc/internal/smoothstep.h"
529#include "jemalloc/internal/stats.h"
530#include "jemalloc/internal/ctl.h"
531#include "jemalloc/internal/tsd.h"
532#include "jemalloc/internal/witness.h"
533#include "jemalloc/internal/mutex.h"
534#include "jemalloc/internal/mb.h"
535#include "jemalloc/internal/extent.h"
536#include "jemalloc/internal/base.h"
537#include "jemalloc/internal/rtree.h"
538#include "jemalloc/internal/pages.h"
539#include "jemalloc/internal/chunk.h"
540#include "jemalloc/internal/huge.h"
541
542#ifndef JEMALLOC_ENABLE_INLINE
543szind_t	size2index_compute(size_t size);
544szind_t	size2index_lookup(size_t size);
545szind_t	size2index(size_t size);
546size_t	index2size_compute(szind_t index);
547size_t	index2size_lookup(szind_t index);
548size_t	index2size(szind_t index);
549size_t	s2u_compute(size_t size);
550size_t	s2u_lookup(size_t size);
551size_t	s2u(size_t size);
552size_t	sa2u(size_t size, size_t alignment);
553arena_t	*arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
554arena_t	*arena_choose(tsd_t *tsd, arena_t *arena);
555arena_t	*arena_ichoose(tsdn_t *tsdn, arena_t *arena);
556arena_tdata_t	*arena_tdata_get(tsd_t *tsd, unsigned ind,
557    bool refresh_if_missing);
558arena_t	*arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
559ticker_t	*decay_ticker_get(tsd_t *tsd, unsigned ind);
560#endif
561
562#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
563JEMALLOC_INLINE szind_t
564size2index_compute(size_t size)
565{
566
567#if (NTBINS != 0)
568	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
569		szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
570		szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
571		return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
572	}
573#endif
574	{
575		szind_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
576		    (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
577		    : lg_floor((size<<1)-1);
578		szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
579		    x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
580		szind_t grp = shift << LG_SIZE_CLASS_GROUP;
581
582		szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
583		    ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
584
585		size_t delta_inverse_mask = ZI(-1) << lg_delta;
586		szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
587		    ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
588
589		szind_t index = NTBINS + grp + mod;
590		return (index);
591	}
592}
593
594JEMALLOC_ALWAYS_INLINE szind_t
595size2index_lookup(size_t size)
596{
597
598	assert(size <= LOOKUP_MAXCLASS);
599	{
600		szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
601		assert(ret == size2index_compute(size));
602		return (ret);
603	}
604}
605
606JEMALLOC_ALWAYS_INLINE szind_t
607size2index(size_t size)
608{
609
610	assert(size > 0);
611	if (likely(size <= LOOKUP_MAXCLASS))
612		return (size2index_lookup(size));
613	return (size2index_compute(size));
614}
615
616JEMALLOC_INLINE size_t
617index2size_compute(szind_t index)
618{
619
620#if (NTBINS > 0)
621	if (index < NTBINS)
622		return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
623#endif
624	{
625		size_t reduced_index = index - NTBINS;
626		size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
627		size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
628		    1);
629
630		size_t grp_size_mask = ~((!!grp)-1);
631		size_t grp_size = ((ZU(1) << (LG_QUANTUM +
632		    (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
633
634		size_t shift = (grp == 0) ? 1 : grp;
635		size_t lg_delta = shift + (LG_QUANTUM-1);
636		size_t mod_size = (mod+1) << lg_delta;
637
638		size_t usize = grp_size + mod_size;
639		return (usize);
640	}
641}
642
643JEMALLOC_ALWAYS_INLINE size_t
644index2size_lookup(szind_t index)
645{
646	size_t ret = (size_t)index2size_tab[index];
647	assert(ret == index2size_compute(index));
648	return (ret);
649}
650
651JEMALLOC_ALWAYS_INLINE size_t
652index2size(szind_t index)
653{
654
655	assert(index < NSIZES);
656	return (index2size_lookup(index));
657}
658
659JEMALLOC_ALWAYS_INLINE size_t
660s2u_compute(size_t size)
661{
662
663#if (NTBINS > 0)
664	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
665		size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
666		size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
667		return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
668		    (ZU(1) << lg_ceil));
669	}
670#endif
671	{
672		size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
673		    (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
674		    : lg_floor((size<<1)-1);
675		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
676		    ?  LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
677		size_t delta = ZU(1) << lg_delta;
678		size_t delta_mask = delta - 1;
679		size_t usize = (size + delta_mask) & ~delta_mask;
680		return (usize);
681	}
682}
683
684JEMALLOC_ALWAYS_INLINE size_t
685s2u_lookup(size_t size)
686{
687	size_t ret = index2size_lookup(size2index_lookup(size));
688
689	assert(ret == s2u_compute(size));
690	return (ret);
691}
692
693/*
694 * Compute usable size that would result from allocating an object with the
695 * specified size.
696 */
697JEMALLOC_ALWAYS_INLINE size_t
698s2u(size_t size)
699{
700
701	assert(size > 0);
702	if (likely(size <= LOOKUP_MAXCLASS))
703		return (s2u_lookup(size));
704	return (s2u_compute(size));
705}
706
707/*
708 * Compute usable size that would result from allocating an object with the
709 * specified size and alignment.
710 */
711JEMALLOC_ALWAYS_INLINE size_t
712sa2u(size_t size, size_t alignment)
713{
714	size_t usize;
715
716	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
717
718	/* Try for a small size class. */
719	if (size <= SMALL_MAXCLASS && alignment < PAGE) {
720		/*
721		 * Round size up to the nearest multiple of alignment.
722		 *
723		 * This done, we can take advantage of the fact that for each
724		 * small size class, every object is aligned at the smallest
725		 * power of two that is non-zero in the base two representation
726		 * of the size.  For example:
727		 *
728		 *   Size |   Base 2 | Minimum alignment
729		 *   -----+----------+------------------
730		 *     96 |  1100000 |  32
731		 *    144 | 10100000 |  32
732		 *    192 | 11000000 |  64
733		 */
734		usize = s2u(ALIGNMENT_CEILING(size, alignment));
735		if (usize < LARGE_MINCLASS)
736			return (usize);
737	}
738
739	/* Try for a large size class. */
740	if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
741		/*
742		 * We can't achieve subpage alignment, so round up alignment
743		 * to the minimum that can actually be supported.
744		 */
745		alignment = PAGE_CEILING(alignment);
746
747		/* Make sure result is a large size class. */
748		usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
749
750		/*
751		 * Calculate the size of the over-size run that arena_palloc()
752		 * would need to allocate in order to guarantee the alignment.
753		 */
754		if (usize + large_pad + alignment - PAGE <= arena_maxrun)
755			return (usize);
756	}
757
758	/* Huge size class.  Beware of overflow. */
759
760	if (unlikely(alignment > HUGE_MAXCLASS))
761		return (0);
762
763	/*
764	 * We can't achieve subchunk alignment, so round up alignment to the
765	 * minimum that can actually be supported.
766	 */
767	alignment = CHUNK_CEILING(alignment);
768
769	/* Make sure result is a huge size class. */
770	if (size <= chunksize)
771		usize = chunksize;
772	else {
773		usize = s2u(size);
774		if (usize < size) {
775			/* size_t overflow. */
776			return (0);
777		}
778	}
779
780	/*
781	 * Calculate the multi-chunk mapping that huge_palloc() would need in
782	 * order to guarantee the alignment.
783	 */
784	if (usize + alignment - PAGE < usize) {
785		/* size_t overflow. */
786		return (0);
787	}
788	return (usize);
789}
790
791/* Choose an arena based on a per-thread value. */
792JEMALLOC_INLINE arena_t *
793arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
794{
795	arena_t *ret;
796
797	if (arena != NULL)
798		return (arena);
799
800	ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
801	if (unlikely(ret == NULL))
802		ret = arena_choose_hard(tsd, internal);
803
804	return (ret);
805}
806
807JEMALLOC_INLINE arena_t *
808arena_choose(tsd_t *tsd, arena_t *arena)
809{
810
811	return (arena_choose_impl(tsd, arena, false));
812}
813
814JEMALLOC_INLINE arena_t *
815arena_ichoose(tsdn_t *tsdn, arena_t *arena)
816{
817
818	assert(!tsdn_null(tsdn) || arena != NULL);
819
820	if (!tsdn_null(tsdn))
821		return (arena_choose_impl(tsdn_tsd(tsdn), NULL, true));
822	return (arena);
823}
824
825JEMALLOC_INLINE arena_tdata_t *
826arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
827{
828	arena_tdata_t *tdata;
829	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
830
831	if (unlikely(arenas_tdata == NULL)) {
832		/* arenas_tdata hasn't been initialized yet. */
833		return (arena_tdata_get_hard(tsd, ind));
834	}
835	if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
836		/*
837		 * ind is invalid, cache is old (too small), or tdata to be
838		 * initialized.
839		 */
840		return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
841		    NULL);
842	}
843
844	tdata = &arenas_tdata[ind];
845	if (likely(tdata != NULL) || !refresh_if_missing)
846		return (tdata);
847	return (arena_tdata_get_hard(tsd, ind));
848}
849
850JEMALLOC_INLINE arena_t *
851arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
852{
853	arena_t *ret;
854
855	assert(ind <= MALLOCX_ARENA_MAX);
856
857	ret = arenas[ind];
858	if (unlikely(ret == NULL)) {
859		ret = atomic_read_p((void *)&arenas[ind]);
860		if (init_if_missing && unlikely(ret == NULL))
861			ret = arena_init(tsdn, ind);
862	}
863	return (ret);
864}
865
866JEMALLOC_INLINE ticker_t *
867decay_ticker_get(tsd_t *tsd, unsigned ind)
868{
869	arena_tdata_t *tdata;
870
871	tdata = arena_tdata_get(tsd, ind, true);
872	if (unlikely(tdata == NULL))
873		return (NULL);
874	return (&tdata->decay_ticker);
875}
876#endif
877
878#include "jemalloc/internal/bitmap.h"
879/*
880 * Include portions of arena.h interleaved with tcache.h in order to resolve
881 * circular dependencies.
882 */
883#define	JEMALLOC_ARENA_INLINE_A
884#include "jemalloc/internal/arena.h"
885#undef JEMALLOC_ARENA_INLINE_A
886#include "jemalloc/internal/tcache.h"
887#define	JEMALLOC_ARENA_INLINE_B
888#include "jemalloc/internal/arena.h"
889#undef JEMALLOC_ARENA_INLINE_B
890#include "jemalloc/internal/hash.h"
891#include "jemalloc/internal/quarantine.h"
892
893#ifndef JEMALLOC_ENABLE_INLINE
894arena_t	*iaalloc(const void *ptr);
895size_t	isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
896void	*iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
897    tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
898void	*ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
899    bool slow_path);
900void	*ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
901    tcache_t *tcache, bool is_metadata, arena_t *arena);
902void	*ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
903    tcache_t *tcache, arena_t *arena);
904void	*ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
905size_t	ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
906size_t	u2rz(size_t usize);
907size_t	p2rz(tsdn_t *tsdn, const void *ptr);
908void	idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
909    bool slow_path);
910void	idalloc(tsd_t *tsd, void *ptr);
911void	iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
912void	isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
913    bool slow_path);
914void	isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
915    bool slow_path);
916void	*iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
917    size_t extra, size_t alignment, bool zero, tcache_t *tcache,
918    arena_t *arena);
919void	*iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
920    size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
921void	*iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
922    size_t alignment, bool zero);
923bool	ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
924    size_t extra, size_t alignment, bool zero);
925#endif
926
927#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
928JEMALLOC_ALWAYS_INLINE arena_t *
929iaalloc(const void *ptr)
930{
931
932	assert(ptr != NULL);
933
934	return (arena_aalloc(ptr));
935}
936
937/*
938 * Typical usage:
939 *   tsdn_t *tsdn = [...]
940 *   void *ptr = [...]
941 *   size_t sz = isalloc(tsdn, ptr, config_prof);
942 */
943JEMALLOC_ALWAYS_INLINE size_t
944isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
945{
946
947	assert(ptr != NULL);
948	/* Demotion only makes sense if config_prof is true. */
949	assert(config_prof || !demote);
950
951	return (arena_salloc(tsdn, ptr, demote));
952}
953
954JEMALLOC_ALWAYS_INLINE void *
955iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
956    bool is_metadata, arena_t *arena, bool slow_path)
957{
958	void *ret;
959
960	assert(size != 0);
961	assert(!is_metadata || tcache == NULL);
962	assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
963
964	ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
965	if (config_stats && is_metadata && likely(ret != NULL)) {
966		arena_metadata_allocated_add(iaalloc(ret),
967		    isalloc(tsdn, ret, config_prof));
968	}
969	return (ret);
970}
971
972JEMALLOC_ALWAYS_INLINE void *
973ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
974{
975
976	return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
977	    false, NULL, slow_path));
978}
979
980JEMALLOC_ALWAYS_INLINE void *
981ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
982    tcache_t *tcache, bool is_metadata, arena_t *arena)
983{
984	void *ret;
985
986	assert(usize != 0);
987	assert(usize == sa2u(usize, alignment));
988	assert(!is_metadata || tcache == NULL);
989	assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
990
991	ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
992	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
993	if (config_stats && is_metadata && likely(ret != NULL)) {
994		arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
995		    config_prof));
996	}
997	return (ret);
998}
999
1000JEMALLOC_ALWAYS_INLINE void *
1001ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
1002    tcache_t *tcache, arena_t *arena)
1003{
1004
1005	return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
1006}
1007
1008JEMALLOC_ALWAYS_INLINE void *
1009ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
1010{
1011
1012	return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
1013	    tcache_get(tsd, true), false, NULL));
1014}
1015
1016JEMALLOC_ALWAYS_INLINE size_t
1017ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
1018{
1019	extent_node_t *node;
1020
1021	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
1022	node = chunk_lookup(ptr, false);
1023	if (node == NULL)
1024		return (0);
1025	/* Only arena chunks should be looked up via interior pointers. */
1026	assert(extent_node_addr_get(node) == ptr ||
1027	    extent_node_achunk_get(node));
1028
1029	return (isalloc(tsdn, ptr, demote));
1030}
1031
1032JEMALLOC_INLINE size_t
1033u2rz(size_t usize)
1034{
1035	size_t ret;
1036
1037	if (usize <= SMALL_MAXCLASS) {
1038		szind_t binind = size2index(usize);
1039		ret = arena_bin_info[binind].redzone_size;
1040	} else
1041		ret = 0;
1042
1043	return (ret);
1044}
1045
1046JEMALLOC_INLINE size_t
1047p2rz(tsdn_t *tsdn, const void *ptr)
1048{
1049	size_t usize = isalloc(tsdn, ptr, false);
1050
1051	return (u2rz(usize));
1052}
1053
1054JEMALLOC_ALWAYS_INLINE void
1055idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
1056    bool slow_path)
1057{
1058
1059	assert(ptr != NULL);
1060	assert(!is_metadata || tcache == NULL);
1061	assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
1062	if (config_stats && is_metadata) {
1063		arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
1064		    config_prof));
1065	}
1066
1067	arena_dalloc(tsdn, ptr, tcache, slow_path);
1068}
1069
1070JEMALLOC_ALWAYS_INLINE void
1071idalloc(tsd_t *tsd, void *ptr)
1072{
1073
1074	idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
1075}
1076
1077JEMALLOC_ALWAYS_INLINE void
1078iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1079{
1080
1081	if (slow_path && config_fill && unlikely(opt_quarantine))
1082		quarantine(tsd, ptr);
1083	else
1084		idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
1085}
1086
1087JEMALLOC_ALWAYS_INLINE void
1088isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
1089    bool slow_path)
1090{
1091
1092	arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
1093}
1094
1095JEMALLOC_ALWAYS_INLINE void
1096isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
1097{
1098
1099	if (slow_path && config_fill && unlikely(opt_quarantine))
1100		quarantine(tsd, ptr);
1101	else
1102		isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
1103}
1104
1105JEMALLOC_ALWAYS_INLINE void *
1106iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
1107    size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
1108{
1109	void *p;
1110	size_t usize, copysize;
1111
1112	usize = sa2u(size + extra, alignment);
1113	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
1114		return (NULL);
1115	p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
1116	if (p == NULL) {
1117		if (extra == 0)
1118			return (NULL);
1119		/* Try again, without extra this time. */
1120		usize = sa2u(size, alignment);
1121		if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
1122			return (NULL);
1123		p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
1124		    arena);
1125		if (p == NULL)
1126			return (NULL);
1127	}
1128	/*
1129	 * Copy at most size bytes (not size+extra), since the caller has no
1130	 * expectation that the extra bytes will be reliably preserved.
1131	 */
1132	copysize = (size < oldsize) ? size : oldsize;
1133	memcpy(p, ptr, copysize);
1134	isqalloc(tsd, ptr, oldsize, tcache, true);
1135	return (p);
1136}
1137
1138JEMALLOC_ALWAYS_INLINE void *
1139iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
1140    bool zero, tcache_t *tcache, arena_t *arena)
1141{
1142
1143	assert(ptr != NULL);
1144	assert(size != 0);
1145
1146	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1147	    != 0) {
1148		/*
1149		 * Existing object alignment is inadequate; allocate new space
1150		 * and copy.
1151		 */
1152		return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
1153		    zero, tcache, arena));
1154	}
1155
1156	return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
1157	    tcache));
1158}
1159
1160JEMALLOC_ALWAYS_INLINE void *
1161iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
1162    bool zero)
1163{
1164
1165	return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
1166	    tcache_get(tsd, true), NULL));
1167}
1168
1169JEMALLOC_ALWAYS_INLINE bool
1170ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
1171    size_t alignment, bool zero)
1172{
1173
1174	assert(ptr != NULL);
1175	assert(size != 0);
1176
1177	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1178	    != 0) {
1179		/* Existing object alignment is inadequate. */
1180		return (true);
1181	}
1182
1183	return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
1184}
1185#endif
1186
1187#include "jemalloc/internal/prof.h"
1188
1189#undef JEMALLOC_H_INLINES
1190/******************************************************************************/
1191#endif /* JEMALLOC_INTERNAL_H */
1192