Lines Matching refs:pmap

52  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
87 __FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/pmap.c 338484 2018-09-05 21:28:33Z kib $");
218 struct pmap kernel_pmap_store;
245 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
260 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
261 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
263 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
264 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
269 static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
270 static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
272 static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
273 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
275 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
277 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
280 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
283 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
314 pmap_l0(pmap_t pmap, vm_offset_t va)
317 return (&pmap->pm_l0[pmap_l0_index(va)]);
330 pmap_l1(pmap_t pmap, vm_offset_t va)
334 l0 = pmap_l0(pmap, va);
351 pmap_l2(pmap_t pmap, vm_offset_t va)
355 l1 = pmap_l1(pmap, va);
376 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
380 l0 = pmap_l0(pmap, va);
411 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
416 l1 = pmap_l1(pmap, va);
460 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
465 if (pmap->pm_l0 == NULL)
468 l0p = pmap_l0(pmap, va);
500 pmap_is_current(pmap_t pmap)
503 return ((pmap == pmap_kernel()) ||
504 (pmap == curthread->td_proc->p_vmspace->vm_map.pmap));
548 pmap_resident_count_inc(pmap_t pmap, int count)
551 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
552 pmap->pm_stats.resident_count += count;
556 pmap_resident_count_dec(pmap_t pmap, int count)
559 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
560 KASSERT(pmap->pm_stats.resident_count >= count,
561 ("pmap %p resident count underflow %ld %d", pmap,
562 pmap->pm_stats.resident_count, count));
563 pmap->pm_stats.resident_count -= count;
875 * Initialize the pmap module.
876 * Called by vm_init, to initialize any structures that the pmap
888 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
893 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
899 rw_init(&pv_list_locks[i], "pmap pv list");
937 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
951 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
968 pmap_invalidate_all(pmap_t pmap)
987 pmap_extract(pmap_t pmap, vm_offset_t va)
994 PMAP_LOCK(pmap);
999 pte = pmap_pte(pmap, va, &lvl);
1024 PMAP_UNLOCK(pmap);
1032 * with the given pmap and virtual address pair
1036 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1046 PMAP_LOCK(pmap);
1048 pte = pmap_pte(pmap, va, &lvl);
1072 if (vm_page_pa_tryrelock(pmap,
1080 PMAP_UNLOCK(pmap);
1347 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1352 _pmap_unwire_l3(pmap, va, m, free);
1359 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1362 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1370 l0 = pmap_l0(pmap, va);
1377 l1 = pmap_l1(pmap, va);
1384 l2 = pmap_l2(pmap, va);
1388 pmap_resident_count_dec(pmap, 1);
1394 l1 = pmap_l1(pmap, va);
1397 pmap_unwire_l3(pmap, va, l2pg, free);
1403 l0 = pmap_l0(pmap, va);
1406 pmap_unwire_l3(pmap, va, l1pg, free);
1408 pmap_invalidate_page(pmap, va);
1429 pmap_unuse_l3(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1438 return (pmap_unwire_l3(pmap, va, mpte, free));
1442 pmap_pinit0(pmap_t pmap)
1445 PMAP_LOCK_INIT(pmap);
1446 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1447 pmap->pm_l0 = kernel_pmap->pm_l0;
1448 pmap->pm_root.rt_root = 0;
1452 pmap_pinit(pmap_t pmap)
1465 pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys);
1468 pagezero(pmap->pm_l0);
1470 pmap->pm_root.rt_root = 0;
1471 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1488 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1492 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1501 PMAP_UNLOCK(pmap);
1503 PMAP_LOCK(pmap);
1525 l0 = &pmap->pm_l0[l0index];
1536 l0 = &pmap->pm_l0[l0index];
1540 if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
1565 l0 = &pmap->pm_l0[l0index];
1569 if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1585 if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1606 pmap_resident_count_inc(pmap, 1);
1612 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1630 pde = pmap_pde(pmap, va, &lvl);
1669 m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1683 * Called when a pmap initialized by pmap_pinit is being released.
1687 pmap_release(pmap_t pmap)
1691 KASSERT(pmap->pm_stats.resident_count == 0,
1692 ("pmap_release: pmap resident count %ld != 0",
1693 pmap->pm_stats.resident_count));
1694 KASSERT(vm_radix_is_empty(&pmap->pm_root),
1695 ("pmap_release: pmap has reserved page table page(s)"));
1697 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l0));
1845 * Returns NULL if PV entries were reclaimed from the specified pmap.
1858 pmap_t next_pmap, pmap;
1871 pmap = NULL;
1900 * corresponding pmap is locked.
1902 if (pmap != next_pmap) {
1903 if (pmap != NULL && pmap != locked_pmap)
1904 PMAP_UNLOCK(pmap);
1905 pmap = next_pmap;
1907 if (pmap > locked_pmap) {
1909 PMAP_LOCK(pmap);
1912 } else if (pmap != locked_pmap) {
1913 if (PMAP_TRYLOCK(pmap)) {
1917 pmap = NULL; /* pmap is not locked */
1938 pde = pmap_pde(pmap, va, &lvl);
1947 pmap_invalidate_page(pmap, va);
1965 pmap_unuse_l3(pmap, va, pmap_load(pde), &free);
1974 pmap_resident_count_dec(pmap, freed);
1978 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1991 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1994 if (pmap == locked_pmap)
2000 if (active_reclaims == 1 && pmap != NULL) {
2019 if (pmap != NULL && pmap != locked_pmap)
2020 PMAP_UNLOCK(pmap);
2036 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2041 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2053 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
2054 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2055 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2059 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2084 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
2090 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
2097 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2100 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2114 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2115 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
2131 m = reclaim_pv_chunk(pmap, lockp);
2139 pc->pc_pmap = pmap;
2147 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2154 * Ensure that the number of spare PV entries in the specified pmap meets or
2160 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
2168 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2175 * contrast, these chunks must be added to the pmap upon allocation.
2180 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
2193 m = reclaim_pv_chunk(pmap, lockp);
2202 pc->pc_pmap = pmap;
2206 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2211 * The reclaim might have freed a chunk from the current pmap.
2226 * First find and then remove the pv entry for the specified pmap and virtual
2232 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2237 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2252 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2262 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2274 pv = pmap_pvh_remove(pvh, pmap, va);
2283 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2302 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2303 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2307 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2308 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2315 * First find and then destroy the pv entry for the specified pmap and virtual
2320 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2324 pv = pmap_pvh_remove(pvh, pmap, va);
2326 free_pv_entry(pmap, pv);
2334 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
2339 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2341 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
2355 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2362 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2363 if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3)))
2367 pmap_invalidate_page(pmap, va);
2369 pmap->pm_stats.wired_count -= 1;
2370 pmap_resident_count_dec(pmap, 1);
2378 pmap_pvh_free(&m->md, pmap, va);
2386 return (pmap_unuse_l3(pmap, va, l2e, free));
2396 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2407 if (pmap->pm_stats.resident_count == 0)
2412 PMAP_LOCK(pmap);
2417 if (pmap->pm_stats.resident_count == 0)
2420 l0 = pmap_l0(pmap, sva);
2451 if (pmap_demote_l2_locked(pmap, l2, sva & ~L2_OFFSET,
2478 pmap_invalidate_range(pmap, va, sva);
2485 if (pmap_remove_l3(pmap, l3, sva, l3_paddr, &free,
2492 pmap_invalidate_range(pmap, va, sva);
2496 PMAP_UNLOCK(pmap);
2518 pmap_t pmap;
2535 pmap = PV_PMAP(pv);
2536 if (!PMAP_TRYLOCK(pmap)) {
2539 PMAP_LOCK(pmap);
2543 PMAP_UNLOCK(pmap);
2548 pte = pmap_pte(pmap, va, &lvl);
2554 pmap_demote_l2_locked(pmap, pte, va, &lock);
2555 PMAP_UNLOCK(pmap);
2558 pmap = PV_PMAP(pv);
2559 if (!PMAP_TRYLOCK(pmap)) {
2563 PMAP_LOCK(pmap);
2567 PMAP_UNLOCK(pmap);
2571 pmap_resident_count_dec(pmap, 1);
2573 pde = pmap_pde(pmap, pv->pv_va, &lvl);
2582 if (pmap_is_current(pmap) &&
2587 pmap_invalidate_page(pmap, pv->pv_va);
2589 pmap->pm_stats.wired_count--;
2598 pmap_unuse_l3(pmap, pv->pv_va, tpde, &free);
2601 free_pv_entry(pmap, pv);
2602 PMAP_UNLOCK(pmap);
2614 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2622 pmap_remove(pmap, sva, eva);
2630 PMAP_LOCK(pmap);
2633 l0 = pmap_l0(pmap, sva);
2658 l3p = pmap_demote_l2(pmap, l2, sva);
2690 pmap_invalidate_page(pmap, sva);
2693 PMAP_UNLOCK(pmap);
2697 * Inserts the specified page table page into the specified pmap's collection
2698 * of idle page table pages. Each of a pmap's page table pages is responsible
2699 * for mapping a distinct range of virtual addresses. The pmap's collection is
2703 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
2706 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2707 return (vm_radix_insert(&pmap->pm_root, mpte));
2712 * specified pmap's collection of idle page table pages, and returns it.
2717 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
2720 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2721 return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
2725 * Performs a break-before-make update of a pmap entry. This is needed when
2730 pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
2735 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2748 pmap_invalidate_range(pmap, va, va + size);
2765 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2786 pv = pmap_pvh_remove(&m->md, pmap, va);
2796 pmap_pvh_free(&m->md, pmap, va);
2808 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2815 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2825 " in pmap %p", va, pmap);
2835 " in pmap %p", va, pmap);
2852 if (pmap_insert_pt_page(pmap, mpte)) {
2855 "pmap_promote_l2: failure for va %#lx in pmap %p", va,
2856 pmap);
2861 pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
2866 pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
2869 CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
2870 pmap);
2887 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2920 PMAP_LOCK(pmap);
2922 pde = pmap_pde(pmap, va, &lvl);
2926 (l3 = pmap_demote_l2_locked(pmap, l2, va & ~L2_OFFSET,
2940 mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
2945 PMAP_UNLOCK(pmap);
2948 pde = pmap_pde(pmap, va, &lvl);
2965 pde = pmap_l0(pmap, va);
3016 pmap_invalidate_page(pmap, va);
3036 pmap->pm_stats.wired_count++;
3039 pmap->pm_stats.wired_count--;
3069 if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3))
3076 pmap->pm_stats.wired_count++;
3077 pmap_resident_count_inc(pmap, 1);
3084 pv = get_pv_entry(pmap, &lock);
3102 pmap_update_entry(pmap, l3, new_l3, va, PAGE_SIZE);
3110 pmap_pvh_free(&om->md, pmap, va);
3120 pmap_invalidate_page(pmap, va);
3130 pmap_invalidate_page(pmap, va);
3132 if (pmap != pmap_kernel()) {
3133 if (pmap == &curproc->p_vmspace->vm_pmap &&
3142 pmap_promote_l2(pmap, pde, va, &lock);
3149 PMAP_UNLOCK(pmap);
3166 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3180 PMAP_LOCK(pmap);
3183 mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock);
3188 PMAP_UNLOCK(pmap);
3193 * 1. Current pmap & pmap exists.
3201 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3206 PMAP_LOCK(pmap);
3207 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3210 PMAP_UNLOCK(pmap);
3214 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3226 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3228 CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3246 pde = pmap_pde(pmap, va, &lvl);
3269 mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
3299 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3302 if (pmap_unwire_l3(pmap, va, mpte, &free)) {
3303 pmap_invalidate_page(pmap, va);
3314 pmap_resident_count_inc(pmap, 1);
3330 pmap_invalidate_page(pmap, va);
3340 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3351 * addresses in the given pmap. Every valid mapping within that range
3359 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3365 PMAP_LOCK(pmap);
3367 l0 = pmap_l0(pmap, sva);
3392 l3 = pmap_demote_l2(pmap, l2, sva);
3410 * PG_W must be cleared atomically. Although the pmap
3415 pmap->pm_stats.wired_count--;
3418 PMAP_UNLOCK(pmap);
3544 * Returns true if the pmap's pv is one of the first
3551 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3565 if (PV_PMAP(pv) == pmap) {
3576 if (PV_PMAP(pv) == pmap) {
3600 pmap_t pmap;
3612 pmap = PV_PMAP(pv);
3613 if (!PMAP_TRYLOCK(pmap)) {
3616 PMAP_LOCK(pmap);
3619 PMAP_UNLOCK(pmap);
3623 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3626 PMAP_UNLOCK(pmap);
3631 pmap = PV_PMAP(pv);
3632 if (!PMAP_TRYLOCK(pmap)) {
3636 PMAP_LOCK(pmap);
3640 PMAP_UNLOCK(pmap);
3644 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3648 PMAP_UNLOCK(pmap);
3657 * pmap. This pmap cannot be active on any processor besides the
3660 * This function cannot be applied to the kernel pmap. Moreover, it
3664 * destroy mappings by iterating over the pmap's collection of PV
3672 pmap_remove_pages(pmap_t pmap)
3690 PMAP_LOCK(pmap);
3691 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
3703 pde = pmap_pde(pmap, pv->pv_va, &lvl);
3752 if (pmap_is_current(pmap)) {
3765 pmap_invalidate_page(pmap, pv->pv_va);
3789 pmap_resident_count_dec(pmap,
3800 ml3 = pmap_remove_pt_page(pmap,
3803 pmap_resident_count_dec(pmap,1);
3814 pmap_resident_count_dec(pmap, 1);
3829 pmap_unuse_l3(pmap, pv->pv_va, pmap_load(pde),
3838 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3842 pmap_invalidate_all(pmap);
3845 PMAP_UNLOCK(pmap);
3861 pmap_t pmap;
3870 pmap = PV_PMAP(pv);
3871 if (!PMAP_TRYLOCK(pmap)) {
3874 PMAP_LOCK(pmap);
3877 PMAP_UNLOCK(pmap);
3881 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3895 PMAP_UNLOCK(pmap);
3902 pmap = PV_PMAP(pv);
3903 if (!PMAP_TRYLOCK(pmap)) {
3907 PMAP_LOCK(pmap);
3911 PMAP_UNLOCK(pmap);
3915 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3929 PMAP_UNLOCK(pmap);
3970 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3977 PMAP_LOCK(pmap);
3978 pte = pmap_pte(pmap, addr, &lvl);
3982 PMAP_UNLOCK(pmap);
4008 pmap_t pmap;
4032 pmap = PV_PMAP(pv);
4033 if (!PMAP_TRYLOCK(pmap)) {
4036 PMAP_LOCK(pmap);
4039 PMAP_UNLOCK(pmap);
4045 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4047 pmap_demote_l2_locked(pmap, pte, va & ~L2_OFFSET,
4052 PMAP_UNLOCK(pmap);
4055 pmap = PV_PMAP(pv);
4056 if (!PMAP_TRYLOCK(pmap)) {
4060 PMAP_LOCK(pmap);
4064 PMAP_UNLOCK(pmap);
4069 pte = pmap_pte(pmap, pv->pv_va, &lvl);
4078 pmap_invalidate_page(pmap, pv->pv_va);
4080 PMAP_UNLOCK(pmap);
4087 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
4114 pmap_t pmap;
4141 pmap = PV_PMAP(pv);
4142 if (!PMAP_TRYLOCK(pmap)) {
4145 PMAP_LOCK(pmap);
4148 PMAP_UNLOCK(pmap);
4153 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4176 * and the pmap address to select one 4KB page out of
4190 (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
4192 if (safe_to_clear_referenced(pmap, tpte)) {
4200 } else if (pmap_demote_l2_locked(pmap, pte,
4206 pmap_remove_l3(pmap, l3, va,
4228 PMAP_UNLOCK(pmap);
4245 pmap = PV_PMAP(pv);
4246 if (!PMAP_TRYLOCK(pmap)) {
4250 PMAP_LOCK(pmap);
4253 PMAP_UNLOCK(pmap);
4257 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4269 if (safe_to_clear_referenced(pmap, tpte)) {
4283 pmap_remove_l3(pmap, pte, pv->pv_va, tpde,
4285 pmap_invalidate_page(pmap, pv->pv_va);
4296 PMAP_UNLOCK(pmap);
4313 * given pmap. Depending on the advice, clear the referenced and/or
4317 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4496 pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
4504 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4523 " in pmap %p", va, pmap);
4551 pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
4565 pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
4574 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4589 if ((ml3 = pmap_remove_pt_page(pmap, va)) == NULL) {
4595 " in pmap %p", va, pmap);
4599 pmap_resident_count_inc(pmap, 1);
4643 reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
4645 pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
4651 pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
4655 " in pmap %p %lx", va, pmap, l3[0]);
4668 pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
4674 l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
4681 * perform the pmap work for mincore
4684 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
4691 PMAP_LOCK(pmap);
4694 pte = pmap_pte(pmap, addr, &lvl);
4728 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
4732 PMAP_UNLOCK(pmap);
4740 pmap_t pmap;
4743 pmap = vmspace_pmap(td->td_proc->p_vmspace);
4744 td->td_pcb->pcb_l0addr = vtophys(pmap->pm_l0);
4746 pmap_invalidate_all(pmap);
4751 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
4766 pa = pmap_extract(pmap, va);
4780 pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
4795 PMAP_LOCK(pmap);
4802 if (pmap == kernel_pmap)
4813 PMAP_UNLOCK(pmap);
4820 PMAP_UNLOCK(pmap);