Lines Matching defs:arena

43 static void	arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
46 static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
49 static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
51 static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
52 static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
53 static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
55 static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
59 static void arena_purge(arena_t *arena, bool all);
60 static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
62 static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
64 static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
70 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
71 static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
74 static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
76 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
78 static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
80 static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
248 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
260 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
270 arena->ndirty += npages;
274 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
276 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
281 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
293 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
304 arena->ndirty -= npages;
308 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
310 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
384 arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
406 arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
412 size_t cactive_diff = CHUNK_CEILING((arena->nactive +
413 need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
418 arena->nactive += need_pages;
438 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
517 arena_chunk_alloc(arena_t *arena)
522 if (arena->spare != NULL) {
523 chunk = arena->spare;
524 arena->spare = NULL;
539 malloc_mutex_unlock(&arena->lock);
541 false, &zero, arena->dss_prec);
542 malloc_mutex_lock(&arena->lock);
546 arena->stats.mapped += chunksize;
548 chunk->arena = arena;
590 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
597 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
609 * Remove run from the runs_avail tree, so that the arena does not use
612 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
615 if (arena->spare != NULL) {
616 arena_chunk_t *spare = arena->spare;
618 arena->spare = chunk;
619 malloc_mutex_unlock(&arena->lock);
621 malloc_mutex_lock(&arena->lock);
623 arena->stats.mapped -= chunksize;
625 arena->spare = chunk;
629 arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
636 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
645 arena_run_split(arena, run, size, large, binind, zero);
653 arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
664 /* Search the arena's chunks for the lowest best fit. */
665 run = arena_run_alloc_helper(arena, size, large, binind, zero);
672 chunk = arena_chunk_alloc(arena);
675 arena_run_split(arena, run, size, large, binind, zero);
681 * sufficient memory available while this one dropped arena->lock in
684 return (arena_run_alloc_helper(arena, size, large, binind, zero));
688 arena_maybe_purge(arena_t *arena)
696 if (arena->ndirty <= arena->npurgatory)
698 npurgeable = arena->ndirty - arena->npurgatory;
699 threshold = (arena->nactive >> opt_lg_dirty_mult);
707 arena_purge(arena, false);
711 arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
724 * completely discarded by another thread while arena->lock is dropped
732 * (chunk == arena->spare) is possible, but it is not possible for
736 if (chunk == arena->spare) {
740 arena_chunk_alloc(arena);
744 arena->stats.purged += chunk->ndirty;
775 arena_run_split(arena, run, run_size, true,
794 binind = arena_bin_index(arena, run->bin);
804 malloc_mutex_unlock(&arena->lock);
824 * the arena mutex isn't currently owned by this thread,
838 malloc_mutex_lock(&arena->lock);
840 arena->stats.nmadvise += nmadvise;
852 arena_run_dalloc(arena, run, false, true);
869 arena_purge(arena_t *arena, bool all)
876 arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
878 assert(ndirty == arena->ndirty);
880 assert(arena->ndirty > arena->npurgatory || all);
881 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
882 arena->npurgatory) || all);
885 arena->stats.npurge++;
889 * purge, and add the result to arena->npurgatory. This will keep
893 size_t npurgeable = arena->ndirty - arena->npurgatory;
896 size_t threshold = (arena->nactive >>
903 arena->npurgatory += npurgatory;
909 chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
917 arena->npurgatory -= npurgatory;
930 * 1) This thread sets arena->npurgatory such that
931 * (arena->ndirty - arena->npurgatory) is at the
933 * 2) This thread drops arena->lock.
942 arena->npurgatory += npurgeable - npurgatory;
950 arena->npurgatory -= npurgeable;
952 npurged = arena_chunk_purge(arena, chunk, all);
954 arena->npurgatory += nunpurged;
960 arena_purge_all(arena_t *arena)
963 malloc_mutex_lock(&arena->lock);
964 arena_purge(arena, true);
965 malloc_mutex_unlock(&arena->lock);
969 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
984 size_t binind = arena_bin_index(arena, run->bin);
994 size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
995 CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
999 arena->nactive -= run_pages;
1041 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
1068 arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
1084 arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
1090 arena_chunk_dealloc(arena, chunk);
1101 arena_maybe_purge(arena);
1105 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1133 arena_run_dalloc(arena, run, false, false);
1137 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1165 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1227 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1239 binind = arena_bin_index(arena, bin);
1245 malloc_mutex_lock(&arena->lock);
1246 run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
1257 malloc_mutex_unlock(&arena->lock);
1282 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1289 binind = arena_bin_index(arena, bin);
1292 run = arena_bin_nonfull_run_get(arena, bin);
1313 arena_dalloc_bin_run(arena, chunk, run, bin);
1315 arena_bin_lower_run(arena, chunk, run, bin);
1331 arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1341 if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1343 bin = &arena->bins[binind];
1350 ptr = arena_bin_malloc_hard(arena, bin);
1421 arena_malloc_small(arena_t *arena, size_t size, bool zero)
1430 bin = &arena->bins[binind];
1437 ret = arena_bin_malloc_hard(arena, bin);
1450 if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
1475 arena_malloc_large(arena_t *arena, size_t size, bool zero)
1482 malloc_mutex_lock(&arena->lock);
1483 ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
1485 malloc_mutex_unlock(&arena->lock);
1489 arena->stats.nmalloc_large++;
1490 arena->stats.nrequests_large++;
1491 arena->stats.allocated_large += size;
1492 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1493 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1494 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1497 idump = arena_prof_accum_locked(arena, size);
1498 malloc_mutex_unlock(&arena->lock);
1516 arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
1528 malloc_mutex_lock(&arena->lock);
1529 run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
1531 malloc_mutex_unlock(&arena->lock);
1542 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1546 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1551 arena->stats.nmalloc_large++;
1552 arena->stats.nrequests_large++;
1553 arena->stats.allocated_large += size;
1554 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1555 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1556 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1558 malloc_mutex_unlock(&arena->lock);
1601 size_t binind = arena_bin_index(chunk->arena, bin);
1616 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1628 binind = arena_bin_index(chunk->arena, run->bin);
1639 malloc_mutex_lock(&arena->lock);
1654 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1658 arena_run_dalloc(arena, run, true, false);
1659 malloc_mutex_unlock(&arena->lock);
1667 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1688 arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1712 arena_dalloc_bin_run(arena, chunk, run, bin);
1714 arena_bin_lower_run(arena, chunk, run, bin);
1723 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1733 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1738 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1749 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1753 arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1763 arena->stats.ndalloc_large++;
1764 arena->stats.allocated_large -= size;
1765 arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1766 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1770 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
1774 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1777 malloc_mutex_lock(&arena->lock);
1778 arena_dalloc_large_locked(arena, chunk, ptr);
1779 malloc_mutex_unlock(&arena->lock);
1783 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1793 malloc_mutex_lock(&arena->lock);
1794 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1797 arena->stats.ndalloc_large++;
1798 arena->stats.allocated_large -= oldsize;
1799 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1800 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1802 arena->stats.nmalloc_large++;
1803 arena->stats.nrequests_large++;
1804 arena->stats.allocated_large += size;
1805 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1806 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1807 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1809 malloc_mutex_unlock(&arena->lock);
1813 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1824 malloc_mutex_lock(&arena->lock);
1837 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1858 arena->stats.ndalloc_large++;
1859 arena->stats.allocated_large -= oldsize;
1860 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1861 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1863 arena->stats.nmalloc_large++;
1864 arena->stats.nrequests_large++;
1865 arena->stats.allocated_large += size;
1866 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1867 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1868 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1870 malloc_mutex_unlock(&arena->lock);
1873 malloc_mutex_unlock(&arena->lock);
1898 arena_t *arena;
1901 arena = chunk->arena;
1909 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1913 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
1963 arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
1984 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
1986 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
1997 arena);
1999 ret = arena_malloc(arena, size, zero, try_tcache_alloc);
2019 arena_dss_prec_get(arena_t *arena)
2023 malloc_mutex_lock(&arena->lock);
2024 ret = arena->dss_prec;
2025 malloc_mutex_unlock(&arena->lock);
2030 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2033 malloc_mutex_lock(&arena->lock);
2034 arena->dss_prec = dss_prec;
2035 malloc_mutex_unlock(&arena->lock);
2039 arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2045 malloc_mutex_lock(&arena->lock);
2046 *dss = dss_prec_names[arena->dss_prec];
2047 *nactive += arena->nactive;
2048 *ndirty += arena->ndirty;
2050 astats->mapped += arena->stats.mapped;
2051 astats->npurge += arena->stats.npurge;
2052 astats->nmadvise += arena->stats.nmadvise;
2053 astats->purged += arena->stats.purged;
2054 astats->allocated_large += arena->stats.allocated_large;
2055 astats->nmalloc_large += arena->stats.nmalloc_large;
2056 astats->ndalloc_large += arena->stats.ndalloc_large;
2057 astats->nrequests_large += arena->stats.nrequests_large;
2060 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2061 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2062 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2063 lstats[i].curruns += arena->stats.lstats[i].curruns;
2065 malloc_mutex_unlock(&arena->lock);
2068 arena_bin_t *bin = &arena->bins[i];
2087 arena_new(arena_t *arena, unsigned ind)
2092 arena->ind = ind;
2093 arena->nthreads = 0;
2095 if (malloc_mutex_init(&arena->lock))
2099 memset(&arena->stats, 0, sizeof(arena_stats_t));
2100 arena->stats.lstats =
2103 if (arena->stats.lstats == NULL)
2105 memset(arena->stats.lstats, 0, nlclasses *
2108 ql_new(&arena->tcache_ql);
2112 arena->prof_accumbytes = 0;
2114 arena->dss_prec = chunk_dss_prec_get();
2117 arena_chunk_dirty_new(&arena->chunks_dirty);
2118 arena->spare = NULL;
2120 arena->nactive = 0;
2121 arena->ndirty = 0;
2122 arena->npurgatory = 0;
2124 arena_avail_tree_new(&arena->runs_avail);
2128 bin = &arena->bins[i];
2338 arena_prefork(arena_t *arena)
2342 malloc_mutex_prefork(&arena->lock);
2344 malloc_mutex_prefork(&arena->bins[i].lock);
2348 arena_postfork_parent(arena_t *arena)
2353 malloc_mutex_postfork_parent(&arena->bins[i].lock);
2354 malloc_mutex_postfork_parent(&arena->lock);
2358 arena_postfork_child(arena_t *arena)
2363 malloc_mutex_postfork_child(&arena->bins[i].lock);
2364 malloc_mutex_postfork_child(&arena->lock);