Deleted Added
full compact
chunk.c (286866) chunk.c (296221)
1#define JEMALLOC_CHUNK_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7const char *opt_dss = DSS_DEFAULT;
8size_t opt_lg_chunk = 0;

--- 318 unchanged lines hidden (view full) ---

327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
328 * them if they are returned.
329 */
330static void *
331chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
332 bool *zero, bool *commit, dss_prec_t dss_prec)
333{
334 void *ret;
1#define JEMALLOC_CHUNK_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7const char *opt_dss = DSS_DEFAULT;
8size_t opt_lg_chunk = 0;

--- 318 unchanged lines hidden (view full) ---

327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
328 * them if they are returned.
329 */
330static void *
331chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
332 bool *zero, bool *commit, dss_prec_t dss_prec)
333{
334 void *ret;
335 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
336
337 assert(size != 0);
338 assert((size & chunksize_mask) == 0);
339 assert(alignment != 0);
340 assert((alignment & chunksize_mask) == 0);
341
335
336 assert(size != 0);
337 assert((size & chunksize_mask) == 0);
338 assert(alignment != 0);
339 assert((alignment & chunksize_mask) == 0);
340
342 /* Retained. */
343 if ((ret = chunk_recycle(arena, &chunk_hooks,
344 &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
345 new_addr, size, alignment, zero, commit, true)) != NULL)
346 return (ret);
347
348 /* "primary" dss. */
349 if (have_dss && dss_prec == dss_prec_primary && (ret =
350 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
351 NULL)
352 return (ret);
341 /* "primary" dss. */
342 if (have_dss && dss_prec == dss_prec_primary && (ret =
343 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
344 NULL)
345 return (ret);
353 /*
354 * mmap. Requesting an address is not implemented for
355 * chunk_alloc_mmap(), so only call it if (new_addr == NULL).
356 */
357 if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
358 commit)) != NULL)
346 /* mmap. */
347 if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
348 NULL)
359 return (ret);
360 /* "secondary" dss. */
361 if (have_dss && dss_prec == dss_prec_secondary && (ret =
362 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
363 NULL)
364 return (ret);
365
366 /* All strategies for allocation failed. */

--- 8 unchanged lines hidden (view full) ---

375
376 /*
377 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
378 * because it's critical that chunk_alloc_base() return untouched
379 * demand-zeroed virtual memory.
380 */
381 zero = true;
382 commit = true;
349 return (ret);
350 /* "secondary" dss. */
351 if (have_dss && dss_prec == dss_prec_secondary && (ret =
352 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
353 NULL)
354 return (ret);
355
356 /* All strategies for allocation failed. */

--- 8 unchanged lines hidden (view full) ---

365
366 /*
367 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
368 * because it's critical that chunk_alloc_base() return untouched
369 * demand-zeroed virtual memory.
370 */
371 zero = true;
372 commit = true;
383 ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
373 ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
384 if (ret == NULL)
385 return (NULL);
386 if (config_valgrind)
387 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
388
389 return (ret);
390}
391

--- 21 unchanged lines hidden (view full) ---

413 return (ret);
414}
415
416static arena_t *
417chunk_arena_get(unsigned arena_ind)
418{
419 arena_t *arena;
420
374 if (ret == NULL)
375 return (NULL);
376 if (config_valgrind)
377 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
378
379 return (ret);
380}
381

--- 21 unchanged lines hidden (view full) ---

403 return (ret);
404}
405
406static arena_t *
407chunk_arena_get(unsigned arena_ind)
408{
409 arena_t *arena;
410
421 /* Dodge tsd for a0 in order to avoid bootstrapping issues. */
422 arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
423 false, true);
411 arena = arena_get(arena_ind, false);
424 /*
425 * The arena we're allocating on behalf of must have been initialized
426 * already.
427 */
428 assert(arena != NULL);
429 return (arena);
430}
431

--- 10 unchanged lines hidden (view full) ---

442 if (ret == NULL)
443 return (NULL);
444 if (config_valgrind)
445 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
446
447 return (ret);
448}
449
412 /*
413 * The arena we're allocating on behalf of must have been initialized
414 * already.
415 */
416 assert(arena != NULL);
417 return (arena);
418}
419

--- 10 unchanged lines hidden (view full) ---

430 if (ret == NULL)
431 return (NULL);
432 if (config_valgrind)
433 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
434
435 return (ret);
436}
437
438static void *
439chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
440 size_t size, size_t alignment, bool *zero, bool *commit)
441{
442
443 assert(size != 0);
444 assert((size & chunksize_mask) == 0);
445 assert(alignment != 0);
446 assert((alignment & chunksize_mask) == 0);
447
448 return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained,
449 &arena->chunks_ad_retained, false, new_addr, size, alignment, zero,
450 commit, true));
451}
452
450void *
451chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
452 size_t size, size_t alignment, bool *zero, bool *commit)
453{
454 void *ret;
455
456 chunk_hooks_assure_initialized(arena, chunk_hooks);
453void *
454chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
455 size_t size, size_t alignment, bool *zero, bool *commit)
456{
457 void *ret;
458
459 chunk_hooks_assure_initialized(arena, chunk_hooks);
457 ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
458 arena->ind);
459 if (ret == NULL)
460 return (NULL);
460
461 ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size,
462 alignment, zero, commit);
463 if (ret == NULL) {
464 ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
465 commit, arena->ind);
466 if (ret == NULL)
467 return (NULL);
468 }
469
461 if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
462 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
463 return (ret);
464}
465
466static void
467chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
468 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,

--- 242 unchanged lines hidden (view full) ---

711 if (info.dwPageSize & ((1U << LG_PAGE) - 1))
712 return (true);
713
714 /*
715 * Configure chunksize (if not set) to match granularity (usually 64K),
716 * so pages_map will always take fast path.
717 */
718 if (!opt_lg_chunk) {
470 if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
471 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
472 return (ret);
473}
474
475static void
476chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
477 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,

--- 242 unchanged lines hidden (view full) ---

720 if (info.dwPageSize & ((1U << LG_PAGE) - 1))
721 return (true);
722
723 /*
724 * Configure chunksize (if not set) to match granularity (usually 64K),
725 * so pages_map will always take fast path.
726 */
727 if (!opt_lg_chunk) {
719 opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
728 opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
720 - 1;
721 }
722#else
723 if (!opt_lg_chunk)
724 opt_lg_chunk = LG_CHUNK_DEFAULT;
725#endif
726
727 /* Set variables according to the value of opt_lg_chunk. */
728 chunksize = (ZU(1) << opt_lg_chunk);
729 assert(chunksize >= PAGE);
730 chunksize_mask = chunksize - 1;
731 chunk_npages = (chunksize >> LG_PAGE);
732
733 if (have_dss && chunk_dss_boot())
734 return (true);
729 - 1;
730 }
731#else
732 if (!opt_lg_chunk)
733 opt_lg_chunk = LG_CHUNK_DEFAULT;
734#endif
735
736 /* Set variables according to the value of opt_lg_chunk. */
737 chunksize = (ZU(1) << opt_lg_chunk);
738 assert(chunksize >= PAGE);
739 chunksize_mask = chunksize - 1;
740 chunk_npages = (chunksize >> LG_PAGE);
741
742 if (have_dss && chunk_dss_boot())
743 return (true);
735 if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
736 opt_lg_chunk, chunks_rtree_node_alloc, NULL))
744 if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
745 opt_lg_chunk), chunks_rtree_node_alloc, NULL))
737 return (true);
738
739 return (false);
740}
741
742void
743chunk_prefork(void)
744{

--- 17 unchanged lines hidden ---
746 return (true);
747
748 return (false);
749}
750
751void
752chunk_prefork(void)
753{

--- 17 unchanged lines hidden ---