Deleted Added
sdiff udiff text old ( 242844 ) new ( 245868 )
full compact
1#define JEMALLOC_CHUNK_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7const char *opt_dss = DSS_DEFAULT;
8size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
9
10malloc_mutex_t chunks_mtx;
11chunk_stats_t stats_chunks;
12
13/*
14 * Trees of chunks that were previously allocated (trees differ only in node
15 * ordering). These are used when allocating chunks, in an attempt to re-use
16 * address space. Depending on function, different tree orderings are needed,
17 * which is why there are two trees with the same contents.
18 */
19static extent_tree_t chunks_szad_mmap;
20static extent_tree_t chunks_ad_mmap;
21static extent_tree_t chunks_szad_dss;
22static extent_tree_t chunks_ad_dss;
23
24rtree_t *chunks_rtree;
25
26/* Various chunk-related settings. */
27size_t chunksize;
28size_t chunksize_mask; /* (chunksize - 1). */
29size_t chunk_npages;
30size_t map_bias;
31size_t arena_maxclass; /* Max size class for arenas. */
32
33/******************************************************************************/
34/* Function prototypes for non-inline static functions. */
35
36static void *chunk_recycle(extent_tree_t *chunks_szad,
37 extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
38 bool *zero);
39static void chunk_record(extent_tree_t *chunks_szad,
40 extent_tree_t *chunks_ad, void *chunk, size_t size);
41
42/******************************************************************************/
43
44static void *
45chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
46 size_t alignment, bool base, bool *zero)
47{
48 void *ret;
49 extent_node_t *node;
50 extent_node_t key;
51 size_t alloc_size, leadsize, trailsize;
52 bool zeroed;
53
54 if (base) {
55 /*
56 * This function may need to call base_node_{,de}alloc(), but
57 * the current chunk allocation request is on behalf of the
58 * base allocator. Avoid deadlock (and if that weren't an
59 * issue, potential for infinite recursion) by returning NULL.
60 */
61 return (NULL);
62 }
63
64 alloc_size = size + alignment - chunksize;
65 /* Beware size_t wrap-around. */
66 if (alloc_size < size)
67 return (NULL);
68 key.addr = NULL;
69 key.size = alloc_size;
70 malloc_mutex_lock(&chunks_mtx);
71 node = extent_tree_szad_nsearch(chunks_szad, &key);
72 if (node == NULL) {
73 malloc_mutex_unlock(&chunks_mtx);
74 return (NULL);
75 }
76 leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
77 (uintptr_t)node->addr;
78 assert(node->size >= leadsize + size);
79 trailsize = node->size - leadsize - size;
80 ret = (void *)((uintptr_t)node->addr + leadsize);
81 zeroed = node->zeroed;
82 if (zeroed)
83 *zero = true;
84 /* Remove node from the tree. */
85 extent_tree_szad_remove(chunks_szad, node);
86 extent_tree_ad_remove(chunks_ad, node);
87 if (leadsize != 0) {
88 /* Insert the leading space as a smaller chunk. */
89 node->size = leadsize;
90 extent_tree_szad_insert(chunks_szad, node);
91 extent_tree_ad_insert(chunks_ad, node);
92 node = NULL;
93 }
94 if (trailsize != 0) {
95 /* Insert the trailing space as a smaller chunk. */
96 if (node == NULL) {
97 /*
98 * An additional node is required, but
99 * base_node_alloc() can cause a new base chunk to be
100 * allocated. Drop chunks_mtx in order to avoid
101 * deadlock, and if node allocation fails, deallocate
102 * the result before returning an error.
103 */
104 malloc_mutex_unlock(&chunks_mtx);
105 node = base_node_alloc();
106 if (node == NULL) {
107 chunk_dealloc(ret, size, true);
108 return (NULL);
109 }
110 malloc_mutex_lock(&chunks_mtx);
111 }
112 node->addr = (void *)((uintptr_t)(ret) + size);
113 node->size = trailsize;
114 extent_tree_szad_insert(chunks_szad, node);
115 extent_tree_ad_insert(chunks_ad, node);
116 node = NULL;
117 }
118 malloc_mutex_unlock(&chunks_mtx);
119
120 if (node != NULL)
121 base_node_dealloc(node);
122 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
123 if (*zero) {
124 if (zeroed == false)
125 memset(ret, 0, size);
126 else if (config_debug) {
127 size_t i;
128 size_t *p = (size_t *)(uintptr_t)ret;
129
130 VALGRIND_MAKE_MEM_DEFINED(ret, size);
131 for (i = 0; i < size / sizeof(size_t); i++)
132 assert(p[i] == 0);
133 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
134 }
135 }
136 return (ret);
137}
138
139/*
140 * If the caller specifies (*zero == false), it is still possible to receive
141 * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
142 * takes advantage of this to avoid demanding zeroed chunks, but taking
143 * advantage of them if they are returned.
144 */
145void *
146chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
147 dss_prec_t dss_prec)
148{
149 void *ret;
150
151 assert(size != 0);
152 assert((size & chunksize_mask) == 0);
153 assert(alignment != 0);
154 assert((alignment & chunksize_mask) == 0);
155
156 /* "primary" dss. */
157 if (config_dss && dss_prec == dss_prec_primary) {
158 if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
159 alignment, base, zero)) != NULL)
160 goto label_return;
161 if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
162 goto label_return;
163 }
164 /* mmap. */
165 if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
166 alignment, base, zero)) != NULL)
167 goto label_return;
168 if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
169 goto label_return;
170 /* "secondary" dss. */
171 if (config_dss && dss_prec == dss_prec_secondary) {
172 if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
173 alignment, base, zero)) != NULL)
174 goto label_return;
175 if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
176 goto label_return;
177 }
178
179 /* All strategies for allocation failed. */
180 ret = NULL;
181label_return:
182 if (config_ivsalloc && base == false && ret != NULL) {
183 if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
184 chunk_dealloc(ret, size, true);
185 return (NULL);
186 }
187 }
188 if ((config_stats || config_prof) && ret != NULL) {
189 bool gdump;
190 malloc_mutex_lock(&chunks_mtx);
191 if (config_stats)
192 stats_chunks.nchunks += (size / chunksize);
193 stats_chunks.curchunks += (size / chunksize);
194 if (stats_chunks.curchunks > stats_chunks.highchunks) {
195 stats_chunks.highchunks = stats_chunks.curchunks;
196 if (config_prof)
197 gdump = true;
198 } else if (config_prof)
199 gdump = false;
200 malloc_mutex_unlock(&chunks_mtx);
201 if (config_prof && opt_prof && opt_prof_gdump && gdump)
202 prof_gdump();
203 }
204 assert(CHUNK_ADDR2BASE(ret) == ret);
205 return (ret);
206}
207
208static void
209chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
210 size_t size)
211{
212 bool unzeroed;
213 extent_node_t *xnode, *node, *prev, key;
214
215 unzeroed = pages_purge(chunk, size);
216
217 /*
218 * Allocate a node before acquiring chunks_mtx even though it might not
219 * be needed, because base_node_alloc() may cause a new base chunk to
220 * be allocated, which could cause deadlock if chunks_mtx were already
221 * held.
222 */
223 xnode = base_node_alloc();
224
225 malloc_mutex_lock(&chunks_mtx);
226 key.addr = (void *)((uintptr_t)chunk + size);
227 node = extent_tree_ad_nsearch(chunks_ad, &key);
228 /* Try to coalesce forward. */
229 if (node != NULL && node->addr == key.addr) {
230 /*
231 * Coalesce chunk with the following address range. This does
232 * not change the position within chunks_ad, so only
233 * remove/insert from/into chunks_szad.
234 */
235 extent_tree_szad_remove(chunks_szad, node);
236 node->addr = chunk;
237 node->size += size;
238 node->zeroed = (node->zeroed && (unzeroed == false));
239 extent_tree_szad_insert(chunks_szad, node);
240 if (xnode != NULL)
241 base_node_dealloc(xnode);
242 } else {
243 /* Coalescing forward failed, so insert a new node. */
244 if (xnode == NULL) {
245 /*
246 * base_node_alloc() failed, which is an exceedingly
247 * unlikely failure. Leak chunk; its pages have
248 * already been purged, so this is only a virtual
249 * memory leak.
250 */
251 malloc_mutex_unlock(&chunks_mtx);
252 return;
253 }
254 node = xnode;
255 node->addr = chunk;
256 node->size = size;
257 node->zeroed = (unzeroed == false);
258 extent_tree_ad_insert(chunks_ad, node);
259 extent_tree_szad_insert(chunks_szad, node);
260 }
261
262 /* Try to coalesce backward. */
263 prev = extent_tree_ad_prev(chunks_ad, node);
264 if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
265 chunk) {
266 /*
267 * Coalesce chunk with the previous address range. This does
268 * not change the position within chunks_ad, so only
269 * remove/insert node from/into chunks_szad.
270 */
271 extent_tree_szad_remove(chunks_szad, prev);
272 extent_tree_ad_remove(chunks_ad, prev);
273
274 extent_tree_szad_remove(chunks_szad, node);
275 node->addr = prev->addr;
276 node->size += prev->size;
277 node->zeroed = (node->zeroed && prev->zeroed);
278 extent_tree_szad_insert(chunks_szad, node);
279
280 base_node_dealloc(prev);
281 }
282 malloc_mutex_unlock(&chunks_mtx);
283}
284
285void
286chunk_unmap(void *chunk, size_t size)
287{
288 assert(chunk != NULL);
289 assert(CHUNK_ADDR2BASE(chunk) == chunk);
290 assert(size != 0);
291 assert((size & chunksize_mask) == 0);
292
293 if (config_dss && chunk_in_dss(chunk))
294 chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
295 else if (chunk_dealloc_mmap(chunk, size))
296 chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
297}
298
299void
300chunk_dealloc(void *chunk, size_t size, bool unmap)
301{
302
303 assert(chunk != NULL);
304 assert(CHUNK_ADDR2BASE(chunk) == chunk);
305 assert(size != 0);
306 assert((size & chunksize_mask) == 0);
307
308 if (config_ivsalloc)
309 rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
310 if (config_stats || config_prof) {
311 malloc_mutex_lock(&chunks_mtx);
312 assert(stats_chunks.curchunks >= (size / chunksize));
313 stats_chunks.curchunks -= (size / chunksize);
314 malloc_mutex_unlock(&chunks_mtx);
315 }
316
317 if (unmap)
318 chunk_unmap(chunk, size);
319}
320
321bool
322chunk_boot(void)
323{
324
325 /* Set variables according to the value of opt_lg_chunk. */
326 chunksize = (ZU(1) << opt_lg_chunk);
327 assert(chunksize >= PAGE);
328 chunksize_mask = chunksize - 1;
329 chunk_npages = (chunksize >> LG_PAGE);
330
331 if (config_stats || config_prof) {
332 if (malloc_mutex_init(&chunks_mtx))
333 return (true);
334 memset(&stats_chunks, 0, sizeof(chunk_stats_t));
335 }
336 if (config_dss && chunk_dss_boot())
337 return (true);
338 extent_tree_szad_new(&chunks_szad_mmap);
339 extent_tree_ad_new(&chunks_ad_mmap);
340 extent_tree_szad_new(&chunks_szad_dss);
341 extent_tree_ad_new(&chunks_ad_dss);
342 if (config_ivsalloc) {
343 chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
344 opt_lg_chunk);
345 if (chunks_rtree == NULL)
346 return (true);
347 }
348
349 return (false);
350}
351
352void
353chunk_prefork(void)
354{
355
356 malloc_mutex_lock(&chunks_mtx);
357 if (config_ivsalloc)
358 rtree_prefork(chunks_rtree);
359 chunk_dss_prefork();
360}
361
362void
363chunk_postfork_parent(void)
364{
365
366 chunk_dss_postfork_parent();
367 if (config_ivsalloc)
368 rtree_postfork_parent(chunks_rtree);
369 malloc_mutex_postfork_parent(&chunks_mtx);
370}
371
372void
373chunk_postfork_child(void)
374{
375
376 chunk_dss_postfork_child();
377 if (config_ivsalloc)
378 rtree_postfork_child(chunks_rtree);
379 malloc_mutex_postfork_child(&chunks_mtx);
380}