Lines Matching refs:arena

40 static void	arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
42 static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
44 static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
46 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
204 arena_runs_avail_get(arena_t *arena, szind_t ind)
210 return (&arena->runs_avail[ind - runs_avail_bias]);
214 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
221 arena_run_heap_insert(arena_runs_avail_get(arena, ind),
226 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
233 arena_run_heap_remove(arena_runs_avail_get(arena, ind),
238 arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
251 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
252 arena->ndirty += npages;
256 arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
269 assert(arena->ndirty >= npages);
270 arena->ndirty -= npages;
281 arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
286 extent_node_dirty_insert(node, &arena->runs_dirty,
287 &arena->chunks_cache);
288 arena->ndirty += arena_chunk_dirty_npages(node);
293 arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
298 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
299 arena->ndirty -= arena_chunk_dirty_npages(node);
379 arena_nactive_add(arena_t *arena, size_t add_pages)
383 size_t cactive_add = CHUNK_CEILING((arena->nactive +
384 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
389 arena->nactive += add_pages;
393 arena_nactive_sub(arena_t *arena, size_t sub_pages)
397 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
398 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
402 arena->nactive -= sub_pages;
406 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
420 arena_avail_remove(arena, chunk, run_ind, total_pages);
422 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
423 arena_nactive_add(arena, need_pages);
440 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
443 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
448 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
464 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
465 run_ind << LG_PAGE, size, arena->ind))
469 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
521 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
524 return (arena_run_split_large_helper(arena, run, size, true, zero));
528 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
531 return (arena_run_split_large_helper(arena, run, size, false, zero));
535 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
552 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
553 run_ind << LG_PAGE, size, arena->ind))
556 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
573 arena_chunk_init_spare(arena_t *arena)
577 assert(arena->spare != NULL);
579 chunk = arena->spare;
580 arena->spare = NULL;
595 arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
601 * arena chunks. Arbitrarily mark them as committed. The commit state
605 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
611 arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
616 malloc_mutex_unlock(tsdn, &arena->lock);
618 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
623 LG_PAGE, arena->ind)) {
624 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
629 if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
633 LG_PAGE, arena->ind);
635 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
640 malloc_mutex_lock(tsdn, &arena->lock);
645 arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
651 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
654 if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
655 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
662 chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
667 arena->stats.mapped += chunksize;
668 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
675 arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
681 assert(arena->spare == NULL);
685 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
730 arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
734 if (arena->spare != NULL)
735 chunk = arena_chunk_init_spare(arena);
737 chunk = arena_chunk_init_hard(tsdn, arena);
743 ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
744 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
750 arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
765 chunk_hooks = chunk_hooks_get(tsdn, arena);
767 arena->ind);
770 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
774 arena->stats.mapped -= chunksize;
775 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
780 arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
783 assert(arena->spare != spare);
786 arena_run_dirty_remove(arena, spare, map_bias,
790 arena_chunk_discard(tsdn, arena, spare);
794 arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
809 /* Remove run from runs_avail, so that the arena does not use it. */
810 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
812 ql_remove(&arena->achunks, &chunk->node, ql_link);
813 spare = arena->spare;
814 arena->spare = chunk;
816 arena_spare_discard(tsdn, arena, spare);
820 arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
826 arena->stats.nmalloc_huge++;
827 arena->stats.allocated_huge += usize;
828 arena->stats.hstats[index].nmalloc++;
829 arena->stats.hstats[index].curhchunks++;
833 arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
839 arena->stats.nmalloc_huge--;
840 arena->stats.allocated_huge -= usize;
841 arena->stats.hstats[index].nmalloc--;
842 arena->stats.hstats[index].curhchunks--;
846 arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
852 arena->stats.ndalloc_huge++;
853 arena->stats.allocated_huge -= usize;
854 arena->stats.hstats[index].ndalloc++;
855 arena->stats.hstats[index].curhchunks--;
859 arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
865 arena->stats.ndalloc_huge++;
866 arena->stats.hstats[index].ndalloc--;
870 arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
876 arena->stats.ndalloc_huge--;
877 arena->stats.allocated_huge += usize;
878 arena->stats.hstats[index].ndalloc--;
879 arena->stats.hstats[index].curhchunks++;
883 arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
886 arena_huge_dalloc_stats_update(arena, oldsize);
887 arena_huge_malloc_stats_update(arena, usize);
891 arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
895 arena_huge_dalloc_stats_update_undo(arena, oldsize);
896 arena_huge_malloc_stats_update_undo(arena, usize);
900 arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
904 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
905 node = ql_last(&arena->node_cache, ql_link);
907 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
910 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
911 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
916 arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
919 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
921 ql_tail_insert(&arena->node_cache, node, ql_link);
922 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
926 arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
933 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
937 malloc_mutex_lock(tsdn, &arena->lock);
939 arena_huge_malloc_stats_update_undo(arena, usize);
940 arena->stats.mapped -= usize;
942 arena_nactive_sub(arena, usize >> LG_PAGE);
943 malloc_mutex_unlock(tsdn, &arena->lock);
950 arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
957 malloc_mutex_lock(tsdn, &arena->lock);
961 arena_huge_malloc_stats_update(arena, usize);
962 arena->stats.mapped += usize;
964 arena_nactive_add(arena, usize >> LG_PAGE);
966 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
968 malloc_mutex_unlock(tsdn, &arena->lock);
970 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
978 arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
984 malloc_mutex_lock(tsdn, &arena->lock);
986 arena_huge_dalloc_stats_update(arena, usize);
987 arena->stats.mapped -= usize;
989 arena_nactive_sub(arena, usize >> LG_PAGE);
991 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
992 malloc_mutex_unlock(tsdn, &arena->lock);
996 arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
1003 malloc_mutex_lock(tsdn, &arena->lock);
1005 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1007 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
1009 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
1010 malloc_mutex_unlock(tsdn, &arena->lock);
1014 arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
1020 malloc_mutex_lock(tsdn, &arena->lock);
1022 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1024 arena->stats.mapped -= cdiff;
1026 arena_nactive_sub(arena, udiff >> LG_PAGE);
1033 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1036 malloc_mutex_unlock(tsdn, &arena->lock);
1040 arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
1047 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1051 malloc_mutex_lock(tsdn, &arena->lock);
1053 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1055 arena->stats.mapped -= cdiff;
1057 arena_nactive_sub(arena, udiff >> LG_PAGE);
1058 malloc_mutex_unlock(tsdn, &arena->lock);
1060 cdiff, true, arena->ind)) {
1061 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1069 arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
1073 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1078 malloc_mutex_lock(tsdn, &arena->lock);
1082 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1083 arena->stats.mapped += cdiff;
1085 arena_nactive_add(arena, udiff >> LG_PAGE);
1087 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1089 malloc_mutex_unlock(tsdn, &arena->lock);
1091 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
1095 cdiff, true, arena->ind)) {
1096 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1110 arena_run_first_best_fit(arena_t *arena, size_t size)
1117 arena_runs_avail_get(arena, i));
1126 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
1128 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
1130 if (arena_run_split_large(arena, run, size, zero))
1137 arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
1145 /* Search the arena's chunks for the lowest best fit. */
1146 run = arena_run_alloc_large_helper(arena, size, zero);
1153 chunk = arena_chunk_alloc(tsdn, arena);
1156 if (arena_run_split_large(arena, run, size, zero))
1163 * sufficient memory available while this one dropped arena->lock in
1166 return (arena_run_alloc_large_helper(arena, size, zero));
1170 arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1172 arena_run_t *run = arena_run_first_best_fit(arena, size);
1174 if (arena_run_split_small(arena, run, size, binind))
1181 arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
1190 /* Search the arena's chunks for the lowest best fit. */
1191 run = arena_run_alloc_small_helper(arena, size, binind);
1198 chunk = arena_chunk_alloc(tsdn, arena);
1201 if (arena_run_split_small(arena, run, size, binind))
1208 * sufficient memory available while this one dropped arena->lock in
1211 return (arena_run_alloc_small_helper(arena, size, binind));
1223 arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
1227 malloc_mutex_lock(tsdn, &arena->lock);
1228 lg_dirty_mult = arena->lg_dirty_mult;
1229 malloc_mutex_unlock(tsdn, &arena->lock);
1235 arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
1241 malloc_mutex_lock(tsdn, &arena->lock);
1242 arena->lg_dirty_mult = lg_dirty_mult;
1243 arena_maybe_purge(tsdn, arena);
1244 malloc_mutex_unlock(tsdn, &arena->lock);
1250 arena_decay_deadline_init(arena_t *arena)
1259 nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
1260 nstime_add(&arena->decay_deadline, &arena->decay_interval);
1261 if (arena->decay_time > 0) {
1264 nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
1265 nstime_ns(&arena->decay_interval)));
1266 nstime_add(&arena->decay_deadline, &jitter);
1271 arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
1276 return (nstime_compare(&arena->decay_deadline, time) <= 0);
1280 arena_decay_backlog_npages_limit(const arena_t *arena)
1301 sum += arena->decay_backlog[i] * h_steps[i];
1308 arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
1315 assert(arena_decay_deadline_reached(arena, time));
1318 nstime_subtract(&delta, &arena->decay_epoch);
1319 nadvance_u64 = nstime_divide(&delta, &arena->decay_interval);
1323 nstime_copy(&delta, &arena->decay_interval);
1325 nstime_add(&arena->decay_epoch, &delta);
1328 arena_decay_deadline_init(arena);
1332 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1339 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z],
1342 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
1346 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
1347 arena->decay_ndirty : 0;
1348 arena->decay_ndirty = arena->ndirty;
1349 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1350 arena->decay_backlog_npages_limit =
1351 arena_decay_backlog_npages_limit(arena);
1355 arena_decay_npages_limit(arena_t *arena)
1361 npages_limit = arena->decay_backlog_npages_limit;
1364 if (arena->ndirty > arena->decay_ndirty)
1365 npages_limit += arena->ndirty - arena->decay_ndirty;
1371 arena_decay_init(arena_t *arena, ssize_t decay_time)
1374 arena->decay_time = decay_time;
1376 nstime_init2(&arena->decay_interval, decay_time, 0);
1377 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
1380 nstime_init(&arena->decay_epoch, 0);
1381 nstime_update(&arena->decay_epoch);
1382 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
1383 arena_decay_deadline_init(arena);
1384 arena->decay_ndirty = arena->ndirty;
1385 arena->decay_backlog_npages_limit = 0;
1386 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1401 arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
1405 malloc_mutex_lock(tsdn, &arena->lock);
1406 decay_time = arena->decay_time;
1407 malloc_mutex_unlock(tsdn, &arena->lock);
1413 arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
1419 malloc_mutex_lock(tsdn, &arena->lock);
1426 * arbitrary change during initial arena configuration.
1428 arena_decay_init(arena, decay_time);
1429 arena_maybe_purge(tsdn, arena);
1430 malloc_mutex_unlock(tsdn, &arena->lock);
1436 arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
1442 if (arena->lg_dirty_mult < 0)
1450 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1457 if (arena->ndirty <= threshold)
1459 arena_purge_to_limit(tsdn, arena, threshold);
1464 arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
1472 if (arena->decay_time <= 0) {
1473 if (arena->decay_time == 0)
1474 arena_purge_to_limit(tsdn, arena, 0);
1478 nstime_copy(&time, &arena->decay_epoch);
1481 nstime_copy(&time, &arena->decay_deadline);
1484 if (arena_decay_deadline_reached(arena, &time))
1485 arena_decay_epoch_advance(arena, &time);
1487 ndirty_limit = arena_decay_npages_limit(arena);
1493 if (arena->ndirty <= ndirty_limit)
1495 arena_purge_to_limit(tsdn, arena, ndirty_limit);
1499 arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
1503 if (arena->purging)
1507 arena_maybe_purge_ratio(tsdn, arena);
1509 arena_maybe_purge_decay(tsdn, arena);
1513 arena_dirty_count(arena_t *arena)
1519 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1520 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1521 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
1547 arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1556 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1557 chunkselm = qr_next(&arena->chunks_cache, cc_link);
1558 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
1568 if (opt_purge == purge_mode_decay && arena->ndirty -
1578 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
1600 if (opt_purge == purge_mode_decay && arena->ndirty -
1612 if (chunk == arena->spare)
1613 arena_chunk_alloc(tsdn, arena);
1616 arena_run_split_large(arena, run, run_size, false);
1628 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1637 arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1649 malloc_mutex_unlock(tsdn, &arena->lock);
1683 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1688 flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
1702 * map modification is safe even though the arena mutex
1719 malloc_mutex_lock(tsdn, &arena->lock);
1722 arena->stats.nmadvise += nmadvise;
1723 arena->stats.purged += npurged;
1730 arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1750 arena_node_dalloc(tsdn, arena, chunkselm);
1752 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
1764 arena_run_dalloc(tsdn, arena, run, false, true,
1774 * (arena->ndirty <= ndirty_limit)
1777 * (arena->ndirty >= ndirty_limit)
1780 arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
1782 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1787 arena->purging = true;
1794 size_t ndirty = arena_dirty_count(arena);
1795 assert(ndirty == arena->ndirty);
1797 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1798 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
1803 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
1807 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
1810 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
1814 arena->stats.npurge++;
1817 arena->purging = false;
1821 arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
1824 malloc_mutex_lock(tsdn, &arena->lock);
1826 arena_purge_to_limit(tsdn, arena, 0);
1828 arena_maybe_purge(tsdn, arena);
1829 malloc_mutex_unlock(tsdn, &arena->lock);
1833 arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1873 arena_reset(tsd_t *tsd, arena_t *arena)
1880 * no concurrent operations are happening in this arena, but there are
1894 ql_foreach(node, &arena->achunks, ql_link) {
1895 arena_achunk_prof_reset(tsd, arena,
1903 arena->stats.lstats[i].curruns = 0;
1907 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1908 for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1909 ql_last(&arena->huge, ql_link)) {
1913 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1920 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1923 arena_huge_reset_stats_cancel(arena, usize);
1925 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1927 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
1931 arena_bin_t *bin = &arena->bins[i];
1946 qr_new(&arena->runs_dirty, rd_link);
1947 for (node = qr_next(&arena->chunks_cache, cc_link);
1948 node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
1950 qr_meld(&arena->runs_dirty, &node->rd, rd_link);
1954 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
1955 ql_last(&arena->achunks, ql_link)) {
1956 ql_remove(&arena->achunks, node, ql_link);
1957 arena_chunk_discard(tsd_tsdn(tsd), arena,
1962 if (arena->spare != NULL) {
1963 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
1964 arena->spare = NULL;
1967 assert(!arena->purging);
1968 arena->nactive = 0;
1971 arena_run_heap_new(&arena->runs_avail[i]);
1973 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
1977 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
2005 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
2012 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
2044 arena_avail_remove(arena, chunk, run_ind, prun_pages);
2051 arena_run_dirty_remove(arena, chunk, run_ind,
2069 arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2090 arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
2102 size = arena_run_size_get(arena, chunk, run, run_ind);
2104 arena_nactive_sub(arena, run_pages);
2132 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2142 arena_avail_insert(arena, chunk, run_ind, run_pages);
2145 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
2151 arena_chunk_dalloc(tsdn, arena, chunk);
2162 arena_maybe_purge(tsdn, arena);
2166 arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2202 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
2207 arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2247 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
2274 arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2286 binind = arena_bin_index(arena, bin);
2292 malloc_mutex_lock(tsdn, &arena->lock);
2293 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
2300 malloc_mutex_unlock(tsdn, &arena->lock);
2325 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2331 binind = arena_bin_index(arena, bin);
2334 run = arena_bin_nonfull_run_get(tsdn, arena, bin);
2356 arena_dalloc_bin_run(tsdn, arena, chunk, run,
2359 arena_bin_lower_run(arena, chunk, run, bin);
2375 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
2383 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2385 bin = &arena->bins[binind];
2394 ptr = arena_bin_malloc_hard(tsdn, arena, bin);
2423 arena_decay_tick(tsdn, arena);
2535 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2543 bin = &arena->bins[binind];
2550 ret = arena_bin_malloc_hard(tsdn, arena, bin);
2563 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2584 arena_decay_tick(tsdn, arena);
2589 arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2600 malloc_mutex_lock(tsdn, &arena->lock);
2609 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
2613 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
2615 malloc_mutex_unlock(tsdn, &arena->lock);
2624 arena->stats.nmalloc_large++;
2625 arena->stats.nrequests_large++;
2626 arena->stats.allocated_large += usize;
2627 arena->stats.lstats[index].nmalloc++;
2628 arena->stats.lstats[index].nrequests++;
2629 arena->stats.lstats[index].curruns++;
2632 idump = arena_prof_accum_locked(arena, usize);
2633 malloc_mutex_unlock(tsdn, &arena->lock);
2646 arena_decay_tick(tsdn, arena);
2651 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
2655 assert(!tsdn_null(tsdn) || arena != NULL);
2658 arena = arena_choose(tsdn_tsd(tsdn), arena);
2659 if (unlikely(arena == NULL))
2663 return (arena_malloc_small(tsdn, arena, ind, zero));
2665 return (arena_malloc_large(tsdn, arena, ind, zero));
2666 return (huge_malloc(tsdn, arena, index2size(ind), zero));
2671 arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2681 assert(!tsdn_null(tsdn) || arena != NULL);
2685 arena = arena_choose(tsdn_tsd(tsdn), arena);
2686 if (unlikely(arena == NULL))
2692 malloc_mutex_lock(tsdn, &arena->lock);
2693 run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
2695 malloc_mutex_unlock(tsdn, &arena->lock);
2715 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
2719 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
2722 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2730 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2731 malloc_mutex_unlock(tsdn, &arena->lock);
2739 arena->stats.nmalloc_large++;
2740 arena->stats.nrequests_large++;
2741 arena->stats.allocated_large += usize;
2742 arena->stats.lstats[index].nmalloc++;
2743 arena->stats.lstats[index].nrequests++;
2744 arena->stats.lstats[index].curruns++;
2746 malloc_mutex_unlock(tsdn, &arena->lock);
2754 arena_decay_tick(tsdn, arena);
2759 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2767 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2776 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2782 ret = arena_palloc_large(tsdn, arena, usize, alignment,
2785 ret = huge_malloc(tsdn, arena, usize, zero);
2787 ret = huge_palloc(tsdn, arena, usize, alignment, zero);
2845 arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2853 malloc_mutex_lock(tsdn, &arena->lock);
2854 arena_run_dalloc(tsdn, arena, run, true, false, false);
2855 malloc_mutex_unlock(tsdn, &arena->lock);
2863 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2884 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2897 bin = &arena->bins[binind];
2906 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
2908 arena_bin_lower_run(arena, chunk, run, bin);
2917 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2921 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
2925 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
2934 bin = &arena->bins[run->binind];
2936 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2941 arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2952 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
2953 arena_decay_tick(tsdn, arena);
2975 arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
2992 arena->stats.ndalloc_large++;
2993 arena->stats.allocated_large -= usize;
2994 arena->stats.lstats[index].ndalloc++;
2995 arena->stats.lstats[index].curruns--;
2999 arena_run_dalloc(tsdn, arena, run, true, false, false);
3003 arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
3007 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
3011 arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3015 malloc_mutex_lock(tsdn, &arena->lock);
3016 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
3017 malloc_mutex_unlock(tsdn, &arena->lock);
3018 arena_decay_tick(tsdn, arena);
3022 arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3036 malloc_mutex_lock(tsdn, &arena->lock);
3037 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
3043 arena->stats.ndalloc_large++;
3044 arena->stats.allocated_large -= oldsize;
3045 arena->stats.lstats[oldindex].ndalloc++;
3046 arena->stats.lstats[oldindex].curruns--;
3048 arena->stats.nmalloc_large++;
3049 arena->stats.nrequests_large++;
3050 arena->stats.allocated_large += size;
3051 arena->stats.lstats[index].nmalloc++;
3052 arena->stats.lstats[index].nrequests++;
3053 arena->stats.lstats[index].curruns++;
3055 malloc_mutex_unlock(tsdn, &arena->lock);
3059 arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3070 malloc_mutex_lock(tsdn, &arena->lock);
3094 if (arena_run_split_large(arena, run, splitsize, zero))
3138 arena->stats.ndalloc_large++;
3139 arena->stats.allocated_large -= oldsize;
3140 arena->stats.lstats[oldindex].ndalloc++;
3141 arena->stats.lstats[oldindex].curruns--;
3143 arena->stats.nmalloc_large++;
3144 arena->stats.nrequests_large++;
3145 arena->stats.allocated_large += size;
3146 arena->stats.lstats[index].nmalloc++;
3147 arena->stats.lstats[index].nrequests++;
3148 arena->stats.lstats[index].curruns++;
3150 malloc_mutex_unlock(tsdn, &arena->lock);
3154 malloc_mutex_unlock(tsdn, &arena->lock);
3187 arena_t *arena;
3195 arena = extent_node_arena_get(&chunk->node);
3198 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
3216 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
3266 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
3271 return (arena_malloc(tsdn, arena, usize, size2index(usize),
3276 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
3280 arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
3303 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3318 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3325 arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
3329 malloc_mutex_lock(tsdn, &arena->lock);
3330 ret = arena->dss_prec;
3331 malloc_mutex_unlock(tsdn, &arena->lock);
3336 arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
3341 malloc_mutex_lock(tsdn, &arena->lock);
3342 arena->dss_prec = dss_prec;
3343 malloc_mutex_unlock(tsdn, &arena->lock);
3386 arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3391 *nthreads += arena_nthreads_get(arena, false);
3392 *dss = dss_prec_names[arena->dss_prec];
3393 *lg_dirty_mult = arena->lg_dirty_mult;
3394 *decay_time = arena->decay_time;
3395 *nactive += arena->nactive;
3396 *ndirty += arena->ndirty;
3400 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3405 malloc_mutex_lock(tsdn, &arena->lock);
3406 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3408 malloc_mutex_unlock(tsdn, &arena->lock);
3412 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3422 malloc_mutex_lock(tsdn, &arena->lock);
3423 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3426 astats->mapped += arena->stats.mapped;
3427 astats->retained += arena->stats.retained;
3428 astats->npurge += arena->stats.npurge;
3429 astats->nmadvise += arena->stats.nmadvise;
3430 astats->purged += arena->stats.purged;
3431 astats->metadata_mapped += arena->stats.metadata_mapped;
3432 astats->metadata_allocated += arena_metadata_allocated_get(arena);
3433 astats->allocated_large += arena->stats.allocated_large;
3434 astats->nmalloc_large += arena->stats.nmalloc_large;
3435 astats->ndalloc_large += arena->stats.ndalloc_large;
3436 astats->nrequests_large += arena->stats.nrequests_large;
3437 astats->allocated_huge += arena->stats.allocated_huge;
3438 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3439 astats->ndalloc_huge += arena->stats.ndalloc_huge;
3442 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3443 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3444 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3445 lstats[i].curruns += arena->stats.lstats[i].curruns;
3449 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3450 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3451 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3453 malloc_mutex_unlock(tsdn, &arena->lock);
3456 arena_bin_t *bin = &arena->bins[i];
3475 arena_nthreads_get(arena_t *arena, bool internal)
3478 return (atomic_read_u(&arena->nthreads[internal]));
3482 arena_nthreads_inc(arena_t *arena, bool internal)
3485 atomic_add_u(&arena->nthreads[internal], 1);
3489 arena_nthreads_dec(arena_t *arena, bool internal)
3492 atomic_sub_u(&arena->nthreads[internal], 1);
3498 arena_t *arena;
3502 /* Compute arena size to incorporate sufficient runs_avail elements. */
3506 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3510 arena = (arena_t *)base_alloc(tsdn,
3515 arena = (arena_t *)base_alloc(tsdn, arena_size);
3516 if (arena == NULL)
3519 arena->ind = ind;
3520 arena->nthreads[0] = arena->nthreads[1] = 0;
3521 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
3525 memset(&arena->stats, 0, sizeof(arena_stats_t));
3526 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3528 memset(arena->stats.lstats, 0, nlclasses *
3530 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3533 memset(arena->stats.hstats, 0, nhclasses *
3536 ql_new(&arena->tcache_ql);
3540 arena->prof_accumbytes = 0;
3544 * A nondeterministic seed based on the address of arena reduces
3550 arena->offset_state = config_debug ? ind :
3551 (uint64_t)(uintptr_t)arena;
3554 arena->dss_prec = chunk_dss_prec_get(tsdn);
3556 ql_new(&arena->achunks);
3558 arena->spare = NULL;
3560 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
3561 arena->purging = false;
3562 arena->nactive = 0;
3563 arena->ndirty = 0;
3566 arena_run_heap_new(&arena->runs_avail[i]);
3567 qr_new(&arena->runs_dirty, rd_link);
3568 qr_new(&arena->chunks_cache, cc_link);
3571 arena_decay_init(arena, arena_decay_time_default_get());
3573 ql_new(&arena->huge);
3574 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3578 extent_tree_szad_new(&arena->chunks_szad_cached);
3579 extent_tree_ad_new(&arena->chunks_ad_cached);
3580 extent_tree_szad_new(&arena->chunks_szad_retained);
3581 extent_tree_ad_new(&arena->chunks_ad_retained);
3582 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3585 ql_new(&arena->node_cache);
3586 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3590 arena->chunk_hooks = chunk_hooks_default;
3594 arena_bin_t *bin = &arena->bins[i];
3604 return (arena);
3680 * Make sure that the run will fit within an arena chunk.
3837 arena_prefork0(tsdn_t *tsdn, arena_t *arena)
3840 malloc_mutex_prefork(tsdn, &arena->lock);
3844 arena_prefork1(tsdn_t *tsdn, arena_t *arena)
3847 malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
3851 arena_prefork2(tsdn_t *tsdn, arena_t *arena)
3854 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
3858 arena_prefork3(tsdn_t *tsdn, arena_t *arena)
3863 malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3864 malloc_mutex_prefork(tsdn, &arena->huge_mtx);
3868 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
3872 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
3874 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3875 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3876 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3877 malloc_mutex_postfork_parent(tsdn, &arena->lock);
3881 arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
3885 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
3887 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3888 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3889 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3890 malloc_mutex_postfork_child(tsdn, &arena->lock);