1#define	JEMALLOC_CHUNK_DSS_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3/******************************************************************************/
4/* Data. */
5
6const char	*dss_prec_names[] = {
7	"disabled",
8	"primary",
9	"secondary",
10	"N/A"
11};
12
13/* Current dss precedence default, used when creating new arenas. */
14static dss_prec_t	dss_prec_default = DSS_PREC_DEFAULT;
15
16/*
17 * Protects sbrk() calls.  This avoids malloc races among threads, though it
18 * does not protect against races with threads that call sbrk() directly.
19 */
20static malloc_mutex_t	dss_mtx;
21
22/* Base address of the DSS. */
23static void		*dss_base;
24/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
25static void		*dss_prev;
26/* Current upper limit on DSS addresses. */
27static void		*dss_max;
28
29/******************************************************************************/
30
31static void *
32chunk_dss_sbrk(intptr_t increment)
33{
34
35#ifdef JEMALLOC_DSS
36	return (sbrk(increment));
37#else
38	not_implemented();
39	return (NULL);
40#endif
41}
42
43dss_prec_t
44chunk_dss_prec_get(tsdn_t *tsdn)
45{
46	dss_prec_t ret;
47
48	if (!have_dss)
49		return (dss_prec_disabled);
50	malloc_mutex_lock(tsdn, &dss_mtx);
51	ret = dss_prec_default;
52	malloc_mutex_unlock(tsdn, &dss_mtx);
53	return (ret);
54}
55
56bool
57chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec)
58{
59
60	if (!have_dss)
61		return (dss_prec != dss_prec_disabled);
62	malloc_mutex_lock(tsdn, &dss_mtx);
63	dss_prec_default = dss_prec;
64	malloc_mutex_unlock(tsdn, &dss_mtx);
65	return (false);
66}
67
68void *
69chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
70    size_t alignment, bool *zero, bool *commit)
71{
72	cassert(have_dss);
73	assert(size > 0 && (size & chunksize_mask) == 0);
74	assert(alignment > 0 && (alignment & chunksize_mask) == 0);
75
76	/*
77	 * sbrk() uses a signed increment argument, so take care not to
78	 * interpret a huge allocation request as a negative increment.
79	 */
80	if ((intptr_t)size < 0)
81		return (NULL);
82
83	malloc_mutex_lock(tsdn, &dss_mtx);
84	if (dss_prev != (void *)-1) {
85
86		/*
87		 * The loop is necessary to recover from races with other
88		 * threads that are using the DSS for something other than
89		 * malloc.
90		 */
91		do {
92			void *ret, *cpad, *dss_next;
93			size_t gap_size, cpad_size;
94			intptr_t incr;
95			/* Avoid an unnecessary system call. */
96			if (new_addr != NULL && dss_max != new_addr)
97				break;
98
99			/* Get the current end of the DSS. */
100			dss_max = chunk_dss_sbrk(0);
101
102			/* Make sure the earlier condition still holds. */
103			if (new_addr != NULL && dss_max != new_addr)
104				break;
105
106			/*
107			 * Calculate how much padding is necessary to
108			 * chunk-align the end of the DSS.
109			 */
110			gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
111			    chunksize_mask;
112			/*
113			 * Compute how much chunk-aligned pad space (if any) is
114			 * necessary to satisfy alignment.  This space can be
115			 * recycled for later use.
116			 */
117			cpad = (void *)((uintptr_t)dss_max + gap_size);
118			ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
119			    alignment);
120			cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
121			dss_next = (void *)((uintptr_t)ret + size);
122			if ((uintptr_t)ret < (uintptr_t)dss_max ||
123			    (uintptr_t)dss_next < (uintptr_t)dss_max) {
124				/* Wrap-around. */
125				malloc_mutex_unlock(tsdn, &dss_mtx);
126				return (NULL);
127			}
128			incr = gap_size + cpad_size + size;
129			dss_prev = chunk_dss_sbrk(incr);
130			if (dss_prev == dss_max) {
131				/* Success. */
132				dss_max = dss_next;
133				malloc_mutex_unlock(tsdn, &dss_mtx);
134				if (cpad_size != 0) {
135					chunk_hooks_t chunk_hooks =
136					    CHUNK_HOOKS_INITIALIZER;
137					chunk_dalloc_wrapper(tsdn, arena,
138					    &chunk_hooks, cpad, cpad_size,
139					    false, true);
140				}
141				if (*zero) {
142					JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
143					    ret, size);
144					memset(ret, 0, size);
145				}
146				if (!*commit)
147					*commit = pages_decommit(ret, size);
148				return (ret);
149			}
150		} while (dss_prev != (void *)-1);
151	}
152	malloc_mutex_unlock(tsdn, &dss_mtx);
153
154	return (NULL);
155}
156
157bool
158chunk_in_dss(tsdn_t *tsdn, void *chunk)
159{
160	bool ret;
161
162	cassert(have_dss);
163
164	malloc_mutex_lock(tsdn, &dss_mtx);
165	if ((uintptr_t)chunk >= (uintptr_t)dss_base
166	    && (uintptr_t)chunk < (uintptr_t)dss_max)
167		ret = true;
168	else
169		ret = false;
170	malloc_mutex_unlock(tsdn, &dss_mtx);
171
172	return (ret);
173}
174
175bool
176chunk_dss_boot(void)
177{
178
179	cassert(have_dss);
180
181	if (malloc_mutex_init(&dss_mtx, "dss", WITNESS_RANK_DSS))
182		return (true);
183	dss_base = chunk_dss_sbrk(0);
184	dss_prev = dss_base;
185	dss_max = dss_base;
186
187	return (false);
188}
189
190void
191chunk_dss_prefork(tsdn_t *tsdn)
192{
193
194	if (have_dss)
195		malloc_mutex_prefork(tsdn, &dss_mtx);
196}
197
198void
199chunk_dss_postfork_parent(tsdn_t *tsdn)
200{
201
202	if (have_dss)
203		malloc_mutex_postfork_parent(tsdn, &dss_mtx);
204}
205
206void
207chunk_dss_postfork_child(tsdn_t *tsdn)
208{
209
210	if (have_dss)
211		malloc_mutex_postfork_child(tsdn, &dss_mtx);
212}
213
214/******************************************************************************/
215