huge.c revision 234370
1#define	JEMALLOC_HUGE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7uint64_t	huge_nmalloc;
8uint64_t	huge_ndalloc;
9size_t		huge_allocated;
10
11malloc_mutex_t	huge_mtx;
12
13/******************************************************************************/
14
15/* Tree of chunks that are stand-alone huge allocations. */
16static extent_tree_t	huge;
17
18void *
19huge_malloc(size_t size, bool zero)
20{
21
22	return (huge_palloc(size, chunksize, zero));
23}
24
25void *
26huge_palloc(size_t size, size_t alignment, bool zero)
27{
28	void *ret;
29	size_t csize;
30	extent_node_t *node;
31
32	/* Allocate one or more contiguous chunks for this request. */
33
34	csize = CHUNK_CEILING(size);
35	if (csize == 0) {
36		/* size is large enough to cause size_t wrap-around. */
37		return (NULL);
38	}
39
40	/* Allocate an extent node with which to track the chunk. */
41	node = base_node_alloc();
42	if (node == NULL)
43		return (NULL);
44
45	ret = chunk_alloc(csize, alignment, false, &zero);
46	if (ret == NULL) {
47		base_node_dealloc(node);
48		return (NULL);
49	}
50
51	/* Insert node into huge. */
52	node->addr = ret;
53	node->size = csize;
54
55	malloc_mutex_lock(&huge_mtx);
56	extent_tree_ad_insert(&huge, node);
57	if (config_stats) {
58		stats_cactive_add(csize);
59		huge_nmalloc++;
60		huge_allocated += csize;
61	}
62	malloc_mutex_unlock(&huge_mtx);
63
64	if (config_fill && zero == false) {
65		if (opt_junk)
66			memset(ret, 0xa5, csize);
67		else if (opt_zero)
68			memset(ret, 0, csize);
69	}
70
71	return (ret);
72}
73
74void *
75huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
76{
77
78	/*
79	 * Avoid moving the allocation if the size class can be left the same.
80	 */
81	if (oldsize > arena_maxclass
82	    && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
83	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
84		assert(CHUNK_CEILING(oldsize) == oldsize);
85		if (config_fill && opt_junk && size < oldsize) {
86			memset((void *)((uintptr_t)ptr + size), 0x5a,
87			    oldsize - size);
88		}
89		return (ptr);
90	}
91
92	/* Reallocation would require a move. */
93	return (NULL);
94}
95
96void *
97huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
98    size_t alignment, bool zero)
99{
100	void *ret;
101	size_t copysize;
102
103	/* Try to avoid moving the allocation. */
104	ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
105	if (ret != NULL)
106		return (ret);
107
108	/*
109	 * size and oldsize are different enough that we need to use a
110	 * different size class.  In that case, fall back to allocating new
111	 * space and copying.
112	 */
113	if (alignment > chunksize)
114		ret = huge_palloc(size + extra, alignment, zero);
115	else
116		ret = huge_malloc(size + extra, zero);
117
118	if (ret == NULL) {
119		if (extra == 0)
120			return (NULL);
121		/* Try again, this time without extra. */
122		if (alignment > chunksize)
123			ret = huge_palloc(size, alignment, zero);
124		else
125			ret = huge_malloc(size, zero);
126
127		if (ret == NULL)
128			return (NULL);
129	}
130
131	/*
132	 * Copy at most size bytes (not size+extra), since the caller has no
133	 * expectation that the extra bytes will be reliably preserved.
134	 */
135	copysize = (size < oldsize) ? size : oldsize;
136
137	/*
138	 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
139	 * source nor the destination are in dss.
140	 */
141#ifdef JEMALLOC_MREMAP_FIXED
142	if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
143	    == false && chunk_in_dss(ret) == false))) {
144		size_t newsize = huge_salloc(ret);
145
146		/*
147		 * Remove ptr from the tree of huge allocations before
148		 * performing the remap operation, in order to avoid the
149		 * possibility of another thread acquiring that mapping before
150		 * this one removes it from the tree.
151		 */
152		huge_dalloc(ptr, false);
153		if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
154		    ret) == MAP_FAILED) {
155			/*
156			 * Assuming no chunk management bugs in the allocator,
157			 * the only documented way an error can occur here is
158			 * if the application changed the map type for a
159			 * portion of the old allocation.  This is firmly in
160			 * undefined behavior territory, so write a diagnostic
161			 * message, and optionally abort.
162			 */
163			char buf[BUFERROR_BUF];
164
165			buferror(errno, buf, sizeof(buf));
166			malloc_printf("<jemalloc>: Error in mremap(): %s\n",
167			    buf);
168			if (opt_abort)
169				abort();
170			memcpy(ret, ptr, copysize);
171			chunk_dealloc_mmap(ptr, oldsize);
172		}
173	} else
174#endif
175	{
176		memcpy(ret, ptr, copysize);
177		iqalloc(ptr);
178	}
179	return (ret);
180}
181
182void
183huge_dalloc(void *ptr, bool unmap)
184{
185	extent_node_t *node, key;
186
187	malloc_mutex_lock(&huge_mtx);
188
189	/* Extract from tree of huge allocations. */
190	key.addr = ptr;
191	node = extent_tree_ad_search(&huge, &key);
192	assert(node != NULL);
193	assert(node->addr == ptr);
194	extent_tree_ad_remove(&huge, node);
195
196	if (config_stats) {
197		stats_cactive_sub(node->size);
198		huge_ndalloc++;
199		huge_allocated -= node->size;
200	}
201
202	malloc_mutex_unlock(&huge_mtx);
203
204	if (unmap && config_fill && config_dss && opt_junk)
205		memset(node->addr, 0x5a, node->size);
206
207	chunk_dealloc(node->addr, node->size, unmap);
208
209	base_node_dealloc(node);
210}
211
212size_t
213huge_salloc(const void *ptr)
214{
215	size_t ret;
216	extent_node_t *node, key;
217
218	malloc_mutex_lock(&huge_mtx);
219
220	/* Extract from tree of huge allocations. */
221	key.addr = __DECONST(void *, ptr);
222	node = extent_tree_ad_search(&huge, &key);
223	assert(node != NULL);
224
225	ret = node->size;
226
227	malloc_mutex_unlock(&huge_mtx);
228
229	return (ret);
230}
231
232prof_ctx_t *
233huge_prof_ctx_get(const void *ptr)
234{
235	prof_ctx_t *ret;
236	extent_node_t *node, key;
237
238	malloc_mutex_lock(&huge_mtx);
239
240	/* Extract from tree of huge allocations. */
241	key.addr = __DECONST(void *, ptr);
242	node = extent_tree_ad_search(&huge, &key);
243	assert(node != NULL);
244
245	ret = node->prof_ctx;
246
247	malloc_mutex_unlock(&huge_mtx);
248
249	return (ret);
250}
251
252void
253huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
254{
255	extent_node_t *node, key;
256
257	malloc_mutex_lock(&huge_mtx);
258
259	/* Extract from tree of huge allocations. */
260	key.addr = __DECONST(void *, ptr);
261	node = extent_tree_ad_search(&huge, &key);
262	assert(node != NULL);
263
264	node->prof_ctx = ctx;
265
266	malloc_mutex_unlock(&huge_mtx);
267}
268
269bool
270huge_boot(void)
271{
272
273	/* Initialize chunks data. */
274	if (malloc_mutex_init(&huge_mtx))
275		return (true);
276	extent_tree_ad_new(&huge);
277
278	if (config_stats) {
279		huge_nmalloc = 0;
280		huge_ndalloc = 0;
281		huge_allocated = 0;
282	}
283
284	return (false);
285}
286
287void
288huge_prefork(void)
289{
290
291	malloc_mutex_prefork(&huge_mtx);
292}
293
294void
295huge_postfork_parent(void)
296{
297
298	malloc_mutex_postfork_parent(&huge_mtx);
299}
300
301void
302huge_postfork_child(void)
303{
304
305	malloc_mutex_postfork_child(&huge_mtx);
306}
307