Deleted Added
sdiff udiff text old ( 289900 ) new ( 296221 )
full compact
1#define JEMALLOC_HUGE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static extent_node_t *
7huge_node_get(const void *ptr)
8{

--- 17 unchanged lines hidden (view full) ---

26static void
27huge_node_unset(const void *ptr, const extent_node_t *node)
28{
29
30 chunk_deregister(ptr, node);
31}
32
33void *
34huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
35 tcache_t *tcache)
36{
37 size_t usize;
38
39 usize = s2u(size);
40 if (usize == 0) {
41 /* size_t overflow. */
42 return (NULL);
43 }
44
45 return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
46}
47
48void *
49huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
50 bool zero, tcache_t *tcache)
51{
52 void *ret;
53 size_t usize;
54 extent_node_t *node;
55 bool is_zeroed;
56
57 /* Allocate one or more contiguous chunks for this request. */
58
59 usize = sa2u(size, alignment);
60 if (unlikely(usize == 0))
61 return (NULL);
62 assert(usize >= chunksize);
63
64 /* Allocate an extent node with which to track the chunk. */
65 node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
66 CACHELINE, false, tcache, true, arena);
67 if (node == NULL)
68 return (NULL);
69
70 /*
71 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
72 * it is possible to make correct junk/zero fill decisions below.
73 */
74 is_zeroed = zero;
75 arena = arena_choose(tsd, arena);
76 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
77 size, alignment, &is_zeroed)) == NULL) {
78 idalloctm(tsd, node, tcache, true);
79 return (NULL);
80 }
81
82 extent_node_init(node, arena, ret, size, is_zeroed, true);
83
84 if (huge_node_set(ret, node)) {
85 arena_chunk_dalloc_huge(arena, ret, size);
86 idalloctm(tsd, node, tcache, true);
87 return (NULL);
88 }
89
90 /* Insert node into huge. */
91 malloc_mutex_lock(&arena->huge_mtx);
92 ql_elm_new(node, ql_link);
93 ql_tail_insert(&arena->huge, node, ql_link);
94 malloc_mutex_unlock(&arena->huge_mtx);
95
96 if (zero || (config_fill && unlikely(opt_zero))) {
97 if (!is_zeroed)
98 memset(ret, 0, size);
99 } else if (config_fill && unlikely(opt_junk_alloc))
100 memset(ret, 0xa5, size);
101
102 return (ret);
103}
104
105#ifdef JEMALLOC_JET
106#undef huge_dalloc_junk
107#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
108#endif
109static void

--- 165 unchanged lines hidden (view full) ---

275 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
276 oldsize);
277 }
278
279 return (false);
280}
281
282bool
283huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
284 size_t usize_max, bool zero)
285{
286
287 assert(s2u(oldsize) == oldsize);
288
289 /* Both allocations must be huge to avoid a move. */
290 if (oldsize < chunksize || usize_max < chunksize)
291 return (true);
292
293 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
294 /* Attempt to expand the allocation in-place. */
295 if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
296 return (false);
297 /* Try again, this time with usize_min. */
298 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
299 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
300 oldsize, usize_min, zero))
301 return (false);
302 }
303
304 /*
305 * Avoid moving the allocation if the existing chunk size accommodates
306 * the new size.
307 */
308 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
309 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
310 huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
311 zero);
312 return (false);
313 }
314
315 /* Attempt to shrink the allocation in-place. */
316 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
317 return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
318 return (true);
319}
320
321static void *
322huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
323 size_t alignment, bool zero, tcache_t *tcache)
324{
325

--- 4 unchanged lines hidden (view full) ---

330
331void *
332huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
333 size_t alignment, bool zero, tcache_t *tcache)
334{
335 void *ret;
336 size_t copysize;
337
338 /* Try to avoid moving the allocation. */
339 if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
340 return (ptr);
341
342 /*
343 * usize and oldsize are different enough that we need to use a
344 * different size class. In that case, fall back to allocating new
345 * space and copying.
346 */
347 ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,

--- 19 unchanged lines hidden (view full) ---

367 malloc_mutex_lock(&arena->huge_mtx);
368 ql_remove(&arena->huge, node, ql_link);
369 malloc_mutex_unlock(&arena->huge_mtx);
370
371 huge_dalloc_junk(extent_node_addr_get(node),
372 extent_node_size_get(node));
373 arena_chunk_dalloc_huge(extent_node_arena_get(node),
374 extent_node_addr_get(node), extent_node_size_get(node));
375 idalloctm(tsd, node, tcache, true);
376}
377
378arena_t *
379huge_aalloc(const void *ptr)
380{
381
382 return (extent_node_arena_get(huge_node_get(ptr)));
383}

--- 52 unchanged lines hidden ---