Deleted Added
sdiff udiff text old ( 286866 ) new ( 296221 )
full compact
1#define JEMALLOC_CHUNK_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7const char *opt_dss = DSS_DEFAULT;
8size_t opt_lg_chunk = 0;

--- 318 unchanged lines hidden (view full) ---

327 * advantage of this to avoid demanding zeroed chunks, but taking advantage of
328 * them if they are returned.
329 */
330static void *
331chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
332 bool *zero, bool *commit, dss_prec_t dss_prec)
333{
334 void *ret;
335 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
336
337 assert(size != 0);
338 assert((size & chunksize_mask) == 0);
339 assert(alignment != 0);
340 assert((alignment & chunksize_mask) == 0);
341
342 /* Retained. */
343 if ((ret = chunk_recycle(arena, &chunk_hooks,
344 &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
345 new_addr, size, alignment, zero, commit, true)) != NULL)
346 return (ret);
347
348 /* "primary" dss. */
349 if (have_dss && dss_prec == dss_prec_primary && (ret =
350 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
351 NULL)
352 return (ret);
353 /*
354 * mmap. Requesting an address is not implemented for
355 * chunk_alloc_mmap(), so only call it if (new_addr == NULL).
356 */
357 if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
358 commit)) != NULL)
359 return (ret);
360 /* "secondary" dss. */
361 if (have_dss && dss_prec == dss_prec_secondary && (ret =
362 chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
363 NULL)
364 return (ret);
365
366 /* All strategies for allocation failed. */

--- 8 unchanged lines hidden (view full) ---

375
376 /*
377 * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
378 * because it's critical that chunk_alloc_base() return untouched
379 * demand-zeroed virtual memory.
380 */
381 zero = true;
382 commit = true;
383 ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
384 if (ret == NULL)
385 return (NULL);
386 if (config_valgrind)
387 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
388
389 return (ret);
390}
391

--- 21 unchanged lines hidden (view full) ---

413 return (ret);
414}
415
416static arena_t *
417chunk_arena_get(unsigned arena_ind)
418{
419 arena_t *arena;
420
421 /* Dodge tsd for a0 in order to avoid bootstrapping issues. */
422 arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
423 false, true);
424 /*
425 * The arena we're allocating on behalf of must have been initialized
426 * already.
427 */
428 assert(arena != NULL);
429 return (arena);
430}
431

--- 10 unchanged lines hidden (view full) ---

442 if (ret == NULL)
443 return (NULL);
444 if (config_valgrind)
445 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
446
447 return (ret);
448}
449
450void *
451chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
452 size_t size, size_t alignment, bool *zero, bool *commit)
453{
454 void *ret;
455
456 chunk_hooks_assure_initialized(arena, chunk_hooks);
457 ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
458 arena->ind);
459 if (ret == NULL)
460 return (NULL);
461 if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
462 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
463 return (ret);
464}
465
466static void
467chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
468 extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,

--- 242 unchanged lines hidden (view full) ---

711 if (info.dwPageSize & ((1U << LG_PAGE) - 1))
712 return (true);
713
714 /*
715 * Configure chunksize (if not set) to match granularity (usually 64K),
716 * so pages_map will always take fast path.
717 */
718 if (!opt_lg_chunk) {
719 opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
720 - 1;
721 }
722#else
723 if (!opt_lg_chunk)
724 opt_lg_chunk = LG_CHUNK_DEFAULT;
725#endif
726
727 /* Set variables according to the value of opt_lg_chunk. */
728 chunksize = (ZU(1) << opt_lg_chunk);
729 assert(chunksize >= PAGE);
730 chunksize_mask = chunksize - 1;
731 chunk_npages = (chunksize >> LG_PAGE);
732
733 if (have_dss && chunk_dss_boot())
734 return (true);
735 if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
736 opt_lg_chunk, chunks_rtree_node_alloc, NULL))
737 return (true);
738
739 return (false);
740}
741
742void
743chunk_prefork(void)
744{

--- 17 unchanged lines hidden ---