Deleted Added
full compact
huge.c (289900) huge.c (296221)
1#define JEMALLOC_HUGE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static extent_node_t *
7huge_node_get(const void *ptr)
8{

--- 17 unchanged lines hidden (view full) ---

26static void
27huge_node_unset(const void *ptr, const extent_node_t *node)
28{
29
30 chunk_deregister(ptr, node);
31}
32
33void *
1#define JEMALLOC_HUGE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static extent_node_t *
7huge_node_get(const void *ptr)
8{

--- 17 unchanged lines hidden (view full) ---

26static void
27huge_node_unset(const void *ptr, const extent_node_t *node)
28{
29
30 chunk_deregister(ptr, node);
31}
32
33void *
34huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
34huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
35 tcache_t *tcache)
36{
35 tcache_t *tcache)
36{
37 size_t usize;
38
37
39 usize = s2u(size);
40 if (usize == 0) {
41 /* size_t overflow. */
42 return (NULL);
43 }
38 assert(usize == s2u(usize));
44
45 return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
46}
47
48void *
39
40 return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
41}
42
43void *
49huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
44huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
50 bool zero, tcache_t *tcache)
51{
52 void *ret;
45 bool zero, tcache_t *tcache)
46{
47 void *ret;
53 size_t usize;
48 size_t ausize;
54 extent_node_t *node;
55 bool is_zeroed;
56
57 /* Allocate one or more contiguous chunks for this request. */
58
49 extent_node_t *node;
50 bool is_zeroed;
51
52 /* Allocate one or more contiguous chunks for this request. */
53
59 usize = sa2u(size, alignment);
60 if (unlikely(usize == 0))
54 ausize = sa2u(usize, alignment);
55 if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
61 return (NULL);
56 return (NULL);
62 assert(usize >= chunksize);
57 assert(ausize >= chunksize);
63
64 /* Allocate an extent node with which to track the chunk. */
65 node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
66 CACHELINE, false, tcache, true, arena);
67 if (node == NULL)
68 return (NULL);
69
70 /*
71 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
72 * it is possible to make correct junk/zero fill decisions below.
73 */
74 is_zeroed = zero;
75 arena = arena_choose(tsd, arena);
76 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
58
59 /* Allocate an extent node with which to track the chunk. */
60 node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
61 CACHELINE, false, tcache, true, arena);
62 if (node == NULL)
63 return (NULL);
64
65 /*
66 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
67 * it is possible to make correct junk/zero fill decisions below.
68 */
69 is_zeroed = zero;
70 arena = arena_choose(tsd, arena);
71 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
77 size, alignment, &is_zeroed)) == NULL) {
78 idalloctm(tsd, node, tcache, true);
72 usize, alignment, &is_zeroed)) == NULL) {
73 idalloctm(tsd, node, tcache, true, true);
79 return (NULL);
80 }
81
74 return (NULL);
75 }
76
82 extent_node_init(node, arena, ret, size, is_zeroed, true);
77 extent_node_init(node, arena, ret, usize, is_zeroed, true);
83
84 if (huge_node_set(ret, node)) {
78
79 if (huge_node_set(ret, node)) {
85 arena_chunk_dalloc_huge(arena, ret, size);
86 idalloctm(tsd, node, tcache, true);
80 arena_chunk_dalloc_huge(arena, ret, usize);
81 idalloctm(tsd, node, tcache, true, true);
87 return (NULL);
88 }
89
90 /* Insert node into huge. */
91 malloc_mutex_lock(&arena->huge_mtx);
92 ql_elm_new(node, ql_link);
93 ql_tail_insert(&arena->huge, node, ql_link);
94 malloc_mutex_unlock(&arena->huge_mtx);
95
96 if (zero || (config_fill && unlikely(opt_zero))) {
97 if (!is_zeroed)
82 return (NULL);
83 }
84
85 /* Insert node into huge. */
86 malloc_mutex_lock(&arena->huge_mtx);
87 ql_elm_new(node, ql_link);
88 ql_tail_insert(&arena->huge, node, ql_link);
89 malloc_mutex_unlock(&arena->huge_mtx);
90
91 if (zero || (config_fill && unlikely(opt_zero))) {
92 if (!is_zeroed)
98 memset(ret, 0, size);
93 memset(ret, 0, usize);
99 } else if (config_fill && unlikely(opt_junk_alloc))
94 } else if (config_fill && unlikely(opt_junk_alloc))
100 memset(ret, 0xa5, size);
95 memset(ret, 0xa5, usize);
101
96
97 arena_decay_tick(tsd, arena);
102 return (ret);
103}
104
105#ifdef JEMALLOC_JET
106#undef huge_dalloc_junk
107#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
108#endif
109static void

--- 165 unchanged lines hidden (view full) ---

275 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
276 oldsize);
277 }
278
279 return (false);
280}
281
282bool
98 return (ret);
99}
100
101#ifdef JEMALLOC_JET
102#undef huge_dalloc_junk
103#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
104#endif
105static void

--- 165 unchanged lines hidden (view full) ---

271 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
272 oldsize);
273 }
274
275 return (false);
276}
277
278bool
283huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
279huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
284 size_t usize_max, bool zero)
285{
286
287 assert(s2u(oldsize) == oldsize);
280 size_t usize_max, bool zero)
281{
282
283 assert(s2u(oldsize) == oldsize);
284 /* The following should have been caught by callers. */
285 assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
288
289 /* Both allocations must be huge to avoid a move. */
290 if (oldsize < chunksize || usize_max < chunksize)
291 return (true);
292
293 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
294 /* Attempt to expand the allocation in-place. */
286
287 /* Both allocations must be huge to avoid a move. */
288 if (oldsize < chunksize || usize_max < chunksize)
289 return (true);
290
291 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
292 /* Attempt to expand the allocation in-place. */
295 if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
293 if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
294 zero)) {
295 arena_decay_tick(tsd, huge_aalloc(ptr));
296 return (false);
296 return (false);
297 }
297 /* Try again, this time with usize_min. */
298 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
299 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
298 /* Try again, this time with usize_min. */
299 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
300 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
300 oldsize, usize_min, zero))
301 oldsize, usize_min, zero)) {
302 arena_decay_tick(tsd, huge_aalloc(ptr));
301 return (false);
303 return (false);
304 }
302 }
303
304 /*
305 * Avoid moving the allocation if the existing chunk size accommodates
306 * the new size.
307 */
308 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
309 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
310 huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
311 zero);
305 }
306
307 /*
308 * Avoid moving the allocation if the existing chunk size accommodates
309 * the new size.
310 */
311 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
312 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
313 huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
314 zero);
315 arena_decay_tick(tsd, huge_aalloc(ptr));
312 return (false);
313 }
314
315 /* Attempt to shrink the allocation in-place. */
316 return (false);
317 }
318
319 /* Attempt to shrink the allocation in-place. */
316 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
317 return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
320 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
321 if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
322 arena_decay_tick(tsd, huge_aalloc(ptr));
323 return (false);
324 }
325 }
318 return (true);
319}
320
321static void *
322huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
323 size_t alignment, bool zero, tcache_t *tcache)
324{
325

--- 4 unchanged lines hidden (view full) ---

330
331void *
332huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
333 size_t alignment, bool zero, tcache_t *tcache)
334{
335 void *ret;
336 size_t copysize;
337
326 return (true);
327}
328
329static void *
330huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
331 size_t alignment, bool zero, tcache_t *tcache)
332{
333

--- 4 unchanged lines hidden (view full) ---

338
339void *
340huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
341 size_t alignment, bool zero, tcache_t *tcache)
342{
343 void *ret;
344 size_t copysize;
345
346 /* The following should have been caught by callers. */
347 assert(usize > 0 && usize <= HUGE_MAXCLASS);
348
338 /* Try to avoid moving the allocation. */
349 /* Try to avoid moving the allocation. */
339 if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
350 if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
340 return (ptr);
341
342 /*
343 * usize and oldsize are different enough that we need to use a
344 * different size class. In that case, fall back to allocating new
345 * space and copying.
346 */
347 ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,

--- 19 unchanged lines hidden (view full) ---

367 malloc_mutex_lock(&arena->huge_mtx);
368 ql_remove(&arena->huge, node, ql_link);
369 malloc_mutex_unlock(&arena->huge_mtx);
370
371 huge_dalloc_junk(extent_node_addr_get(node),
372 extent_node_size_get(node));
373 arena_chunk_dalloc_huge(extent_node_arena_get(node),
374 extent_node_addr_get(node), extent_node_size_get(node));
351 return (ptr);
352
353 /*
354 * usize and oldsize are different enough that we need to use a
355 * different size class. In that case, fall back to allocating new
356 * space and copying.
357 */
358 ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,

--- 19 unchanged lines hidden (view full) ---

378 malloc_mutex_lock(&arena->huge_mtx);
379 ql_remove(&arena->huge, node, ql_link);
380 malloc_mutex_unlock(&arena->huge_mtx);
381
382 huge_dalloc_junk(extent_node_addr_get(node),
383 extent_node_size_get(node));
384 arena_chunk_dalloc_huge(extent_node_arena_get(node),
385 extent_node_addr_get(node), extent_node_size_get(node));
375 idalloctm(tsd, node, tcache, true);
386 idalloctm(tsd, node, tcache, true, true);
387
388 arena_decay_tick(tsd, arena);
376}
377
378arena_t *
379huge_aalloc(const void *ptr)
380{
381
382 return (extent_node_arena_get(huge_node_get(ptr)));
383}

--- 52 unchanged lines hidden ---
389}
390
391arena_t *
392huge_aalloc(const void *ptr)
393{
394
395 return (extent_node_arena_get(huge_node_get(ptr)));
396}

--- 52 unchanged lines hidden ---