Lines Matching defs:extent

35     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
44 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
51 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
57 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
102 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109 extent_t *extent, bool *coalesced, bool growing_retained);
111 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
142 * It's possible that the extent changed out from under us, and with it
143 * the leaf->extent mapping. We have to recheck while holding the lock.
182 extent_t *extent = extent_avail_first(&arena->extent_avail);
183 if (extent == NULL) {
187 extent_avail_remove(&arena->extent_avail, extent);
190 return extent;
194 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
196 extent_avail_insert(&arena->extent_avail, extent);
269 * Skip a quantization that may have an adequately large extent,
324 extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
325 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
326 atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
327 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
328 atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
332 extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
333 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
334 atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
335 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
336 atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
340 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
342 assert(extent_state_get(extent) == extents->state);
344 size_t size = extent_size_get(extent);
351 extent_heap_insert(&extents->heaps[pind], extent);
357 extent_list_append(&extents->lru, extent);
371 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
373 assert(extent_state_get(extent) == extents->state);
375 size_t size = extent_size_get(extent);
378 extent_heap_remove(&extents->heaps[pind], extent);
388 extent_list_remove(&extents->lru, extent);
402 * Find an extent with size [min_size, max_size) to satisfy the alignment
403 * requirement. For each size, try only the first extent in the heap.
417 extent_t *extent = extent_heap_first(&extents->heaps[i]);
418 uintptr_t base = (uintptr_t)extent_base_get(extent);
419 size_t candidate_size = extent_size_get(extent);
431 return extent;
439 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
464 extent_t *extent = extent_heap_first(&extents->heaps[i]);
465 assert(extent_size_get(extent) >= size);
476 if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
477 ret = extent;
489 * Do first-fit extent selection, where the selection policy choice is
503 extent_t *extent =
506 if (alignment > PAGE && extent == NULL) {
512 extent = extents_fit_alignment(extents, esize, max_size,
516 return extent;
522 extent_t *extent) {
523 extent_state_set(extent, extent_state_active);
525 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
526 extents, extent, &coalesced, false);
527 extent_state_set(extent, extents_state_get(extents));
532 extents_insert_locked(tsdn, extents, extent);
545 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
547 assert(extent == NULL || extent_dumpable_get(extent));
548 return extent;
553 extents_t *extents, extent_t *extent) {
554 assert(extent_base_get(extent) != NULL);
555 assert(extent_size_get(extent) != 0);
556 assert(extent_dumpable_get(extent));
560 extent_addr_set(extent, extent_base_get(extent));
561 extent_zeroed_set(extent, false);
563 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
575 * Get the LRU coalesced extent, if any. If coalescing was delayed,
576 * the loop will iterate until the LRU extent is fully coalesced.
578 extent_t *extent;
580 /* Get the LRU extent, if any. */
581 extent = extent_list_first(&extents->lru);
582 if (extent == NULL) {
589 extent = NULL;
592 extents_remove_locked(tsdn, extents, extent);
598 rtree_ctx, extents, extent)) {
602 * The LRU extent was just coalesced and the result placed in
608 * Either mark the extent active or deregister it to protect against
616 extent_state_set(extent, extent_state_active);
619 extent_deregister(tsdn, extent);
627 return extent;
631 * This can only happen when we fail to allocate a new extent struct (which
632 * indicates OOM), e.g. when trying to split an existing extent.
636 extents_t *extents, extent_t *extent, bool growing_retained) {
637 size_t sz = extent_size_get(extent);
642 * Leak extent after making sure its pages have already been purged, so
647 extent, 0, sz, growing_retained)) {
649 extent, 0, extent_size_get(extent),
653 extent_dalloc(tsdn, arena, extent);
673 extent_t *extent) {
674 assert(extent_arena_get(extent) == arena);
675 assert(extent_state_get(extent) == extent_state_active);
677 extent_state_set(extent, extents_state_get(extents));
678 extents_insert_locked(tsdn, extents, extent);
683 extent_t *extent) {
685 extent_deactivate_locked(tsdn, arena, extents, extent);
691 extent_t *extent) {
692 assert(extent_arena_get(extent) == arena);
693 assert(extent_state_get(extent) == extents_state_get(extents));
695 extents_remove_locked(tsdn, extents, extent);
696 extent_state_set(extent, extent_state_active);
701 const extent_t *extent, bool dependent, bool init_missing,
704 (uintptr_t)extent_base_get(extent), dependent, init_missing);
711 (uintptr_t)extent_last_get(extent), dependent, init_missing);
722 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
723 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
725 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
731 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
733 assert(extent_slab_get(extent));
736 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
738 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
739 LG_PAGE), extent, szind, true);
744 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
750 if (opt_prof && extent_state_get(extent) == extent_state_active) {
751 size_t nadd = extent_size_get(extent) >> LG_PAGE;
770 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
773 if (opt_prof && extent_state_get(extent) == extent_state_active) {
774 size_t nsub = extent_size_get(extent) >> LG_PAGE;
781 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
790 extent_lock(tsdn, extent);
792 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
794 extent_unlock(tsdn, extent);
798 szind_t szind = extent_szind_get_maybe_invalid(extent);
799 bool slab = extent_slab_get(extent);
800 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
802 extent_interior_register(tsdn, rtree_ctx, extent, szind);
805 extent_unlock(tsdn, extent);
808 extent_gdump_add(tsdn, extent);
815 extent_register(tsdn_t *tsdn, extent_t *extent) {
816 return extent_register_impl(tsdn, extent, true);
820 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
821 return extent_register_impl(tsdn, extent, false);
825 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
826 bool err = extent_register(tsdn, extent);
831 * Removes all pointers to the given extent from the global rtree indices for
833 * metadata lookups at places other than the head of the extent. We deregister
834 * on the interior, then, when an extent moves from being an active slab to an
839 extent_t *extent) {
842 assert(extent_slab_get(extent));
844 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
846 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
852 * Removes all pointers to the given extent from the global rtree.
855 extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
859 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
862 extent_lock(tsdn, extent);
865 if (extent_slab_get(extent)) {
866 extent_interior_deregister(tsdn, rtree_ctx, extent);
867 extent_slab_set(extent, false);
870 extent_unlock(tsdn, extent);
873 extent_gdump_sub(tsdn, extent);
878 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
879 extent_deregister_impl(tsdn, extent, true);
883 extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
884 extent_deregister_impl(tsdn, extent, false);
888 * Tries to find and remove an extent from extents that can be used for the
903 * 1) Recycle a known-extant extent, e.g. during purging.
907 * non-existing extent, or to the base of an extant extent,
919 extent_t *extent;
921 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
923 if (extent != NULL) {
925 * We might null-out extent to report an error, but we
928 extent_t *unlock_extent = extent;
929 assert(extent_base_get(extent) == new_addr);
930 if (extent_arena_get(extent) != arena ||
931 extent_size_get(extent) < esize ||
932 extent_state_get(extent) !=
934 extent = NULL;
939 extent = extents_fit_locked(tsdn, arena, extents, esize,
942 if (extent == NULL) {
947 extent_activate_locked(tsdn, arena, extents, extent);
950 return extent;
954 * Given an allocation request and an extent guaranteed to be able to satisfy
955 * it, this splits off lead and trail extents, leaving extent pointing to an
956 * extent satisfying the allocation.
962 * Split successfully. lead, extent, and trail, are modified to extents
967 * The extent can't satisfy the given allocation request. None of the
974 * None of lead, extent, or trail are valid.
983 extent_t **extent, extent_t **lead, extent_t **trail,
989 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
990 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
992 if (extent_size_get(*extent) < leadsize + esize) {
995 size_t trailsize = extent_size_get(*extent) - leadsize - esize;
1004 *lead = *extent;
1005 *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
1008 if (*extent == NULL) {
1017 *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
1021 *to_leak = *extent;
1024 *extent = NULL;
1034 extent_szind_set(*extent, szind);
1037 (uintptr_t)extent_addr_get(*extent), szind, slab);
1038 if (slab && extent_size_get(*extent) > PAGE) {
1041 (uintptr_t)extent_past_get(*extent) -
1051 * This fulfills the indicated allocation request out of the given extent (which
1053 * before or after the resulting allocation, that space is given its own extent
1060 szind_t szind, extent_t *extent, bool growing_retained) {
1067 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1089 return extent;
1092 * We should have picked an extent that was large enough to
1115 * Need to manually zero the extent on repopulating if either; 1) non
1116 * default extent hooks installed (in which case the purge semantics may
1141 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1144 if (extent == NULL) {
1148 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1149 extents, new_addr, size, pad, alignment, slab, szind, extent,
1151 if (extent == NULL) {
1155 if (*commit && !extent_committed_get(extent)) {
1156 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1157 0, extent_size_get(extent), growing_retained)) {
1159 extent, growing_retained);
1163 extent_zeroed_set(extent, true);
1167 if (extent_committed_get(extent)) {
1170 if (extent_zeroed_get(extent)) {
1175 extent_addr_randomize(tsdn, extent, alignment);
1177 assert(extent_state_get(extent) == extent_state_active);
1179 extent_slab_set(extent, slab);
1180 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1184 void *addr = extent_base_get(extent);
1185 if (!extent_zeroed_get(extent)) {
1186 size_t size = extent_size_get(extent);
1199 return extent;
1273 * The only legitimate case of customized extent hooks for a0 is
1311 * Find the next extent size in the series that would be large enough to
1326 extent_t *extent = extent_alloc(tsdn, arena);
1327 if (extent == NULL) {
1345 extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
1349 extent_dalloc(tsdn, arena, extent);
1353 if (extent_register_no_gdump_add(tsdn, extent)) {
1354 extent_dalloc(tsdn, arena, extent);
1358 if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1361 if (extent_committed_get(extent)) {
1373 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1388 * We should have allocated a sufficiently large extent; the
1407 if (*commit && !extent_committed_get(extent)) {
1408 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1409 extent_size_get(extent), true)) {
1411 &arena->extents_retained, extent, true);
1415 extent_zeroed_set(extent, true);
1433 /* Adjust gdump stats now that extent is final size. */
1434 extent_gdump_add(tsdn, extent);
1437 extent_addr_randomize(tsdn, extent, alignment);
1444 extent_slab_set(extent, true);
1445 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1447 if (*zero && !extent_zeroed_get(extent)) {
1448 void *addr = extent_base_get(extent);
1449 size_t size = extent_size_get(extent);
1456 return extent;
1471 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1474 if (extent != NULL) {
1477 extent_gdump_add(tsdn, extent);
1480 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1488 return extent;
1496 extent_t *extent = extent_alloc(tsdn, arena);
1497 if (extent == NULL) {
1513 extent_dalloc(tsdn, arena, extent);
1516 extent_init(extent, arena, addr, esize, slab, szind,
1520 extent_addr_randomize(tsdn, extent, alignment);
1522 if (extent_register(tsdn, extent)) {
1523 extent_dalloc(tsdn, arena, extent);
1527 return extent;
1539 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1541 if (extent == NULL) {
1551 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1555 assert(extent == NULL || extent_dumpable_get(extent));
1556 return extent;
1602 extent_t *extent, bool *coalesced, bool growing_retained,
1619 extent_past_get(extent), inactive_only);
1627 extent, next);
1632 r_extent_hooks, extents, extent, next, true,
1637 return extent;
1645 if (extent_before_get(extent) != NULL) {
1647 extent_before_get(extent), inactive_only);
1651 extent, prev);
1655 r_extent_hooks, extents, extent, prev, false,
1657 extent = prev;
1661 return extent;
1671 return extent;
1677 extent_t *extent, bool *coalesced, bool growing_retained) {
1679 extents, extent, coalesced, growing_retained, false);
1685 extent_t *extent, bool *coalesced, bool growing_retained) {
1687 extents, extent, coalesced, growing_retained, true);
1691 * Does the metadata management portions of putting an unused extent into the
1696 extents_t *extents, extent_t *extent, bool growing_retained) {
1702 !extent_zeroed_get(extent));
1707 extent_szind_set(extent, SC_NSIZES);
1708 if (extent_slab_get(extent)) {
1709 extent_interior_deregister(tsdn, rtree_ctx, extent);
1710 extent_slab_set(extent, false);
1714 (uintptr_t)extent_base_get(extent), true) == extent);
1717 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1718 rtree_ctx, extents, extent, NULL, growing_retained);
1719 } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
1724 assert(extent_state_get(extent) == extent_state_active);
1725 extent = extent_try_coalesce_large(tsdn, arena,
1726 r_extent_hooks, rtree_ctx, extents, extent,
1729 if (extent_size_get(extent) >= oversize_threshold) {
1730 /* Shortcut to purge the oversize extent eagerly. */
1732 arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
1736 extent_deactivate_locked(tsdn, arena, extents, extent);
1742 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1748 if (extent_register(tsdn, extent)) {
1749 extent_dalloc(tsdn, arena, extent);
1752 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1777 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1780 assert(extent_base_get(extent) != NULL);
1781 assert(extent_size_get(extent) != 0);
1785 extent_addr_set(extent, extent_base_get(extent));
1791 err = extent_dalloc_default_impl(extent_base_get(extent),
1792 extent_size_get(extent));
1797 extent_base_get(extent), extent_size_get(extent),
1798 extent_committed_get(extent), arena_ind_get(arena)));
1803 extent_dalloc(tsdn, arena, extent);
1811 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1812 assert(extent_dumpable_get(extent));
1822 extent_deregister(tsdn, extent);
1824 extent)) {
1827 extent_reregister(tsdn, extent);
1835 if (!extent_committed_get(extent)) {
1837 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1838 0, extent_size_get(extent))) {
1842 extent_base_get(extent), extent_size_get(extent), 0,
1843 extent_size_get(extent), arena_ind_get(arena))) {
1845 } else if (extent_state_get(extent) == extent_state_muzzy ||
1848 extent_base_get(extent), extent_size_get(extent), 0,
1849 extent_size_get(extent), arena_ind_get(arena)))) {
1857 extent_zeroed_set(extent, zeroed);
1860 extent_gdump_sub(tsdn, extent);
1864 extent, false);
1882 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1883 assert(extent_base_get(extent) != NULL);
1884 assert(extent_size_get(extent) != 0);
1889 extent_deregister(tsdn, extent);
1891 extent_addr_set(extent, extent_base_get(extent));
1897 extent_destroy_default_impl(extent_base_get(extent),
1898 extent_size_get(extent));
1902 extent_base_get(extent), extent_size_get(extent),
1903 extent_committed_get(extent), arena_ind_get(arena));
1907 extent_dalloc(tsdn, arena, extent);
1919 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1929 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1930 extent_size_get(extent), offset, length, arena_ind_get(arena)));
1934 extent_committed_set(extent, extent_committed_get(extent) || !err);
1940 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1942 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1955 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1967 extent_base_get(extent), extent_size_get(extent), offset, length,
1972 extent_committed_set(extent, extent_committed_get(extent) && err);
1992 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2006 extent_base_get(extent), extent_size_get(extent), offset, length,
2017 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2019 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
2039 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2053 extent_base_get(extent), extent_size_get(extent), offset, length,
2063 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2065 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
2085 * Accepts the extent to split, and the characteristics of each side of the
2088 * with the trail (the higher addressed portion). This makes 'extent' the lead,
2093 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2096 assert(extent_size_get(extent) == size_a + size_b);
2111 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
2112 size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
2113 extent_state_get(extent), extent_zeroed_get(extent),
2114 extent_committed_get(extent), extent_dumpable_get(extent),
2123 extent_init(&lead, arena, extent_addr_get(extent), size_a,
2124 slab_a, szind_a, extent_sn_get(extent),
2125 extent_state_get(extent), extent_zeroed_get(extent),
2126 extent_committed_get(extent), extent_dumpable_get(extent),
2141 extent_lock2(tsdn, extent, trail);
2146 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2147 size_a + size_b, size_a, size_b, extent_committed_get(extent),
2156 extent_size_set(extent, size_a);
2157 extent_szind_set(extent, szind_a);
2159 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2164 extent_unlock2(tsdn, extent, trail);
2168 extent_unlock2(tsdn, extent, trail);
2177 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2179 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2197 * settings. Assumes the second extent has the higher address.
2213 /* If b is a head extent, disallow the cross-region merge. */
2344 const extent_t *extent = iealloc(tsdn, ptr);
2345 if (unlikely(extent == NULL)) {
2350 *size = extent_size_get(extent);
2351 if (!extent_slab_get(extent)) {
2355 *nfree = extent_nfree_get(extent);
2356 *nregs = bin_infos[extent_szind_get(extent)].nregs;
2358 assert(*nfree * extent_usize_get(extent) <= *size);
2369 const extent_t *extent = iealloc(tsdn, ptr);
2370 if (unlikely(extent == NULL)) {
2376 *size = extent_size_get(extent);
2377 if (!extent_slab_get(extent)) {
2384 *nfree = extent_nfree_get(extent);
2385 const szind_t szind = extent_szind_get(extent);
2388 assert(*nfree * extent_usize_get(extent) <= *size);
2390 const arena_t *arena = extent_arena_get(extent);
2392 const unsigned binshard = extent_binshard_get(extent);