1#ifndef JEMALLOC_INTERNAL_INLINES_B_H
2#define JEMALLOC_INTERNAL_INLINES_B_H
3
4#include "jemalloc/internal/rtree.h"
5
6/* Choose an arena based on a per-thread value. */
7static inline arena_t *
8arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
9	arena_t *ret;
10
11	if (arena != NULL) {
12		return arena;
13	}
14
15	/* During reentrancy, arena 0 is the safest bet. */
16	if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
17		return arena_get(tsd_tsdn(tsd), 0, true);
18	}
19
20	ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
21	if (unlikely(ret == NULL)) {
22		ret = arena_choose_hard(tsd, internal);
23		assert(ret);
24		if (tcache_available(tsd)) {
25			tcache_t *tcache = tcache_get(tsd);
26			if (tcache->arena != NULL) {
27				/* See comments in tcache_data_init().*/
28				assert(tcache->arena ==
29				    arena_get(tsd_tsdn(tsd), 0, false));
30				if (tcache->arena != ret) {
31					tcache_arena_reassociate(tsd_tsdn(tsd),
32					    tcache, ret);
33				}
34			} else {
35				tcache_arena_associate(tsd_tsdn(tsd), tcache,
36				    ret);
37			}
38		}
39	}
40
41	/*
42	 * Note that for percpu arena, if the current arena is outside of the
43	 * auto percpu arena range, (i.e. thread is assigned to a manually
44	 * managed arena), then percpu arena is skipped.
45	 */
46	if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
47	    !internal && (arena_ind_get(ret) <
48	    percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
49	    tsd_tsdn(tsd))) {
50		unsigned ind = percpu_arena_choose();
51		if (arena_ind_get(ret) != ind) {
52			percpu_arena_update(tsd, ind);
53			ret = tsd_arena_get(tsd);
54		}
55		ret->last_thd = tsd_tsdn(tsd);
56	}
57
58	return ret;
59}
60
61static inline arena_t *
62arena_choose(tsd_t *tsd, arena_t *arena) {
63	return arena_choose_impl(tsd, arena, false);
64}
65
66static inline arena_t *
67arena_ichoose(tsd_t *tsd, arena_t *arena) {
68	return arena_choose_impl(tsd, arena, true);
69}
70
71static inline bool
72arena_is_auto(arena_t *arena) {
73	assert(narenas_auto > 0);
74
75	return (arena_ind_get(arena) < manual_arena_base);
76}
77
78JEMALLOC_ALWAYS_INLINE extent_t *
79iealloc(tsdn_t *tsdn, const void *ptr) {
80	rtree_ctx_t rtree_ctx_fallback;
81	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
82
83	return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
84	    (uintptr_t)ptr, true);
85}
86
87#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
88