1#define	JEMALLOC_EXTENT_DSS_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3/******************************************************************************/
4/* Data. */
5
6const char	*opt_dss = DSS_DEFAULT;
7
8const char	*dss_prec_names[] = {
9	"disabled",
10	"primary",
11	"secondary",
12	"N/A"
13};
14
15/*
16 * Current dss precedence default, used when creating new arenas.  NB: This is
17 * stored as unsigned rather than dss_prec_t because in principle there's no
18 * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
19 * atomic operations to synchronize the setting.
20 */
21static unsigned		dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
22
23/* Base address of the DSS. */
24static void		*dss_base;
25/* Atomic boolean indicating whether the DSS is exhausted. */
26static unsigned		dss_exhausted;
27/* Atomic current upper limit on DSS addresses. */
28static void		*dss_max;
29
30/******************************************************************************/
31
32static void *
33extent_dss_sbrk(intptr_t increment)
34{
35#ifdef JEMALLOC_DSS
36	return (sbrk(increment));
37#else
38	not_implemented();
39	return (NULL);
40#endif
41}
42
43dss_prec_t
44extent_dss_prec_get(void)
45{
46	dss_prec_t ret;
47
48	if (!have_dss)
49		return (dss_prec_disabled);
50	ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
51	return (ret);
52}
53
54bool
55extent_dss_prec_set(dss_prec_t dss_prec)
56{
57	if (!have_dss)
58		return (dss_prec != dss_prec_disabled);
59	atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
60	return (false);
61}
62
63static void *
64extent_dss_max_update(void *new_addr)
65{
66	void *max_cur;
67	spin_t spinner;
68
69	/*
70	 * Get the current end of the DSS as max_cur and assure that dss_max is
71	 * up to date.
72	 */
73	spin_init(&spinner);
74	while (true) {
75		void *max_prev = atomic_read_p(&dss_max);
76
77		max_cur = extent_dss_sbrk(0);
78		if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
79			/*
80			 * Another thread optimistically updated dss_max.  Wait
81			 * for it to finish.
82			 */
83			spin_adaptive(&spinner);
84			continue;
85		}
86		if (!atomic_cas_p(&dss_max, max_prev, max_cur))
87			break;
88	}
89	/* Fixed new_addr can only be supported if it is at the edge of DSS. */
90	if (new_addr != NULL && max_cur != new_addr)
91		return (NULL);
92
93	return (max_cur);
94}
95
96void *
97extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
98    size_t alignment, bool *zero, bool *commit)
99{
100	extent_t *gap;
101
102	cassert(have_dss);
103	assert(size > 0);
104	assert(alignment > 0);
105
106	/*
107	 * sbrk() uses a signed increment argument, so take care not to
108	 * interpret a large allocation request as a negative increment.
109	 */
110	if ((intptr_t)size < 0)
111		return (NULL);
112
113	gap = extent_alloc(tsdn, arena);
114	if (gap == NULL)
115		return (NULL);
116
117	if (!atomic_read_u(&dss_exhausted)) {
118		/*
119		 * The loop is necessary to recover from races with other
120		 * threads that are using the DSS for something other than
121		 * malloc.
122		 */
123		while (true) {
124			void *ret, *max_cur, *gap_addr, *dss_next, *dss_prev;
125			size_t gap_size;
126			intptr_t incr;
127
128			max_cur = extent_dss_max_update(new_addr);
129			if (max_cur == NULL)
130				goto label_oom;
131
132			/*
133			 * Compute how much gap space (if any) is necessary to
134			 * satisfy alignment.  This space can be recycled for
135			 * later use.
136			 */
137			gap_addr = (void *)(PAGE_CEILING((uintptr_t)max_cur));
138			ret = (void *)ALIGNMENT_CEILING((uintptr_t)gap_addr,
139			    PAGE_CEILING(alignment));
140			gap_size = (uintptr_t)ret - (uintptr_t)gap_addr;
141			if (gap_size != 0) {
142				extent_init(gap, arena, gap_addr, gap_size,
143				    gap_size, arena_extent_sn_next(arena),
144				    false, false, true, false);
145			}
146			dss_next = (void *)((uintptr_t)ret + size);
147			if ((uintptr_t)ret < (uintptr_t)max_cur ||
148			    (uintptr_t)dss_next < (uintptr_t)max_cur)
149				goto label_oom; /* Wrap-around. */
150			incr = gap_size + size;
151
152			/*
153			 * Optimistically update dss_max, and roll back below if
154			 * sbrk() fails.  No other thread will try to extend the
155			 * DSS while dss_max is greater than the current DSS
156			 * max reported by sbrk(0).
157			 */
158			if (atomic_cas_p(&dss_max, max_cur, dss_next))
159				continue;
160
161			/* Try to allocate. */
162			dss_prev = extent_dss_sbrk(incr);
163			if (dss_prev == max_cur) {
164				/* Success. */
165				if (gap_size != 0)
166					extent_dalloc_gap(tsdn, arena, gap);
167				else
168					extent_dalloc(tsdn, arena, gap);
169				if (!*commit)
170					*commit = pages_decommit(ret, size);
171				if (*zero && *commit) {
172					extent_hooks_t *extent_hooks =
173					    EXTENT_HOOKS_INITIALIZER;
174					extent_t extent;
175
176					extent_init(&extent, arena, ret, size,
177					    size, 0, true, false, true, false);
178					if (extent_purge_forced_wrapper(tsdn,
179					    arena, &extent_hooks, &extent, 0,
180					    size))
181						memset(ret, 0, size);
182				}
183				return (ret);
184			}
185			/*
186			 * Failure, whether due to OOM or a race with a raw
187			 * sbrk() call from outside the allocator.  Try to roll
188			 * back optimistic dss_max update; if rollback fails,
189			 * it's due to another caller of this function having
190			 * succeeded since this invocation started, in which
191			 * case rollback is not necessary.
192			 */
193			atomic_cas_p(&dss_max, dss_next, max_cur);
194			if (dss_prev == (void *)-1) {
195				/* OOM. */
196				atomic_write_u(&dss_exhausted, (unsigned)true);
197				goto label_oom;
198			}
199		}
200	}
201label_oom:
202	extent_dalloc(tsdn, arena, gap);
203	return (NULL);
204}
205
206static bool
207extent_in_dss_helper(void *addr, void *max)
208{
209	return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
210	    (uintptr_t)max);
211}
212
213bool
214extent_in_dss(void *addr)
215{
216	cassert(have_dss);
217
218	return (extent_in_dss_helper(addr, atomic_read_p(&dss_max)));
219}
220
221bool
222extent_dss_mergeable(void *addr_a, void *addr_b)
223{
224	void *max;
225
226	cassert(have_dss);
227
228	if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
229	    (uintptr_t)dss_base)
230		return (true);
231
232	max = atomic_read_p(&dss_max);
233	return (extent_in_dss_helper(addr_a, max) ==
234	    extent_in_dss_helper(addr_b, max));
235}
236
237void
238extent_dss_boot(void)
239{
240	cassert(have_dss);
241
242	dss_base = extent_dss_sbrk(0);
243	dss_exhausted = (unsigned)(dss_base == (void *)-1);
244	dss_max = dss_base;
245}
246
247/******************************************************************************/
248