Lines Matching refs:pmap

43  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
78 __FBSDID("$FreeBSD: stable/11/sys/i386/i386/pmap.c 351449 2019-08-24 00:35:59Z jhb $");
195 struct pmap kernel_pmap_store;
196 LIST_HEAD(pmaplist, pmap);
215 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
295 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
296 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
297 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
298 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
300 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
302 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
303 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
307 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
308 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
310 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
313 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
314 static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
323 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
325 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
328 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
330 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
332 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
333 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
335 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
337 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
338 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
340 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
344 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
346 static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
347 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free);
348 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
364 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
373 * and just syncs the pmap module with what has already been done.
410 * Initialize the kernel pmap (which is statically allocated).
426 rw_init(&pvh_global_lock, "pmap pv global");
784 * Initialize the pmap module.
785 * Called by vm_init, to initialize any structures that the pmap
797 * Initialize the vm page array entries for the kernel pmap's
822 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
824 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
846 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
961 pmap_t pmap;
966 LIST_FOREACH(pmap, &allpmaps, pm_list) {
967 if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] &
970 pde = pmap_pde(pmap, va);
1040 * invalidated. This can happen one of two ways: (1) The pmap becomes
1043 * table is globally performed. (2) The pmap becomes active on another
1046 * pmap as inactive on the other processor.
1053 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1059 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
1066 if (CPU_ISSET(cpuid, &pmap->pm_active))
1068 CPU_AND(&other_cpus, &pmap->pm_active);
1071 smp_masked_invlpg(*mask, va, pmap);
1079 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1086 pmap_invalidate_all(pmap);
1091 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
1099 if (CPU_ISSET(cpuid, &pmap->pm_active))
1102 CPU_AND(&other_cpus, &pmap->pm_active);
1105 smp_masked_invlpg_range(*mask, sva, eva, pmap);
1110 pmap_invalidate_all(pmap_t pmap)
1116 if (pmap == kernel_pmap) {
1119 } else if (!CPU_CMP(&pmap->pm_active, &all_cpus)) {
1126 if (CPU_ISSET(cpuid, &pmap->pm_active))
1128 CPU_AND(&other_cpus, &pmap->pm_active);
1131 smp_masked_invltlb(*mask, pmap);
1158 pmap_t pmap;
1167 LIST_FOREACH(pmap, &allpmaps, pm_list) {
1168 pde = pmap_pde(pmap, act->va);
1201 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1211 if (pmap == kernel_pmap)
1214 active = pmap->pm_active;
1223 smp_no_rendezvous_barrier, pmap == kernel_pmap ?
1227 if (pmap == kernel_pmap)
1239 * We inline these within pmap.c for speed.
1242 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
1245 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1250 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1254 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1260 pmap_invalidate_all(pmap_t pmap)
1263 if (pmap == kernel_pmap)
1265 else if (!CPU_EMPTY(&pmap->pm_active))
1277 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
1280 if (pmap == kernel_pmap)
1284 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
1290 pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1305 pmap_invalidate_range(pmap, va, va + NBPDR - 1);
1307 pmap_invalidate_page(pmap, va);
1394 pmap_is_current(pmap_t pmap)
1397 return (pmap == kernel_pmap || pmap ==
1402 * If the given pmap is not the current or kernel pmap, the returned pte must
1406 pmap_pte(pmap_t pmap, vm_offset_t va)
1411 pde = pmap_pde(pmap, va);
1416 if (pmap_is_current(pmap))
1462 * If the given pmap is not the current pmap, pvh_global_lock
1466 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
1471 pde = pmap_pde(pmap, va);
1476 if (pmap_is_current(pmap))
1509 pmap_extract(pmap_t pmap, vm_offset_t va)
1516 PMAP_LOCK(pmap);
1517 pde = pmap->pm_pdir[va >> PDRSHIFT];
1522 pte = pmap_pte(pmap, va);
1527 PMAP_UNLOCK(pmap);
1535 * with the given pmap and virtual address pair
1539 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1548 PMAP_LOCK(pmap);
1550 pde = *pmap_pde(pmap, va);
1554 if (vm_page_pa_tryrelock(pmap, (pde &
1560 ptep = pmap_pte(pmap, va);
1565 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
1575 PMAP_UNLOCK(pmap);
1764 * Inserts the specified page table page into the specified pmap's collection
1765 * of idle page table pages. Each of a pmap's page table pages is responsible
1766 * for mapping a distinct range of virtual addresses. The pmap's collection is
1770 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
1773 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1774 return (vm_radix_insert(&pmap->pm_root, mpte));
1779 * specified pmap's collection of idle page table pages, and returns it.
1784 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
1787 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1788 return (vm_radix_remove(&pmap->pm_root, va >> PDRSHIFT));
1798 pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
1803 _pmap_unwire_ptp(pmap, m, free);
1810 _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
1817 pmap->pm_pdir[m->pindex] = 0;
1818 --pmap->pm_stats.resident_count;
1825 pmap_invalidate_page(pmap, pteva);
1839 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free)
1846 ptepde = *pmap_pde(pmap, va);
1848 return (pmap_unwire_ptp(pmap, mpte, free));
1852 * Initialize the pmap for the swapper process.
1855 pmap_pinit0(pmap_t pmap)
1858 PMAP_LOCK_INIT(pmap);
1860 * Since the page table directory is shared with the kernel pmap,
1861 * which is already included in the list "allpmaps", this pmap does
1864 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
1866 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
1868 pmap->pm_root.rt_root = 0;
1869 CPU_ZERO(&pmap->pm_active);
1870 TAILQ_INIT(&pmap->pm_pvchunk);
1871 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1872 pmap_activate_boot(pmap);
1876 * Initialize a preallocated and zeroed pmap structure,
1880 pmap_pinit(pmap_t pmap)
1890 if (pmap->pm_pdir == NULL) {
1891 pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD);
1892 if (pmap->pm_pdir == NULL)
1895 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
1896 KASSERT(((vm_offset_t)pmap->pm_pdpt &
1899 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
1902 pmap->pm_root.rt_root = 0;
1904 KASSERT(vm_radix_is_empty(&pmap->pm_root),
1905 ("pmap_pinit: pmap has reserved page table page(s)"));
1920 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
1924 pagezero(pmap->pm_pdir + (i * NPDEPG));
1927 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1929 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
1935 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
1937 pmap->pm_pdpt[i] = pa | PG_V;
1941 CPU_ZERO(&pmap->pm_active);
1942 TAILQ_INIT(&pmap->pm_pvchunk);
1943 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1953 _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
1964 PMAP_UNLOCK(pmap);
1968 PMAP_LOCK(pmap);
1985 pmap->pm_stats.resident_count++;
1988 pmap->pm_pdir[ptepindex] =
1995 pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
2009 ptepa = pmap->pm_pdir[ptepindex];
2016 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
2017 ptepa = pmap->pm_pdir[ptepindex];
2032 m = _pmap_allocpte(pmap, ptepindex, flags);
2046 * Called when a pmap initialized by pmap_pinit is being released.
2050 pmap_release(pmap_t pmap)
2055 KASSERT(pmap->pm_stats.resident_count == 0,
2056 ("pmap_release: pmap resident count %ld != 0",
2057 pmap->pm_stats.resident_count));
2058 KASSERT(vm_radix_is_empty(&pmap->pm_root),
2059 ("pmap_release: pmap has reserved page table page(s)"));
2060 KASSERT(CPU_EMPTY(&pmap->pm_active),
2061 ("releasing active pmap %p", pmap));
2064 LIST_REMOVE(pmap, pm_list);
2068 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] &
2071 bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
2072 sizeof(*pmap->pm_pdir));
2074 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
2079 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
2221 pmap_t pmap;
2231 pmap = NULL;
2238 if (pmap != pc->pc_pmap) {
2239 if (pmap != NULL) {
2240 pmap_invalidate_all(pmap);
2241 if (pmap != locked_pmap)
2242 PMAP_UNLOCK(pmap);
2244 pmap = pc->pc_pmap;
2246 if (pmap > locked_pmap)
2247 PMAP_LOCK(pmap);
2248 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
2249 pmap = NULL;
2265 pde = pmap_pde(pmap, va);
2268 pte = pmap_pte(pmap, va);
2276 ("pmap_pv_reclaim: pmap %p va %x zero pte",
2277 pmap, va));
2279 pmap_invalidate_page(pmap, va);
2295 pmap_unuse_pt(pmap, va, &free);
2304 pmap->pm_stats.resident_count -= freed;
2308 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2311 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2319 if (pmap == locked_pmap)
2336 if (pmap != NULL) {
2337 pmap_invalidate_all(pmap);
2338 if (pmap != locked_pmap)
2339 PMAP_UNLOCK(pmap);
2355 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2361 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2376 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
2378 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2379 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2384 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2410 get_pv_entry(pmap_t pmap, boolean_t try)
2420 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2426 "increasing either the vm.pmap.shpgperproc or the "
2427 "vm.pmap.pv_entries tunable.\n");
2429 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2446 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2447 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2464 m = pmap_pv_reclaim(pmap);
2472 pc->pc_pmap = pmap;
2478 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2484 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2490 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2499 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2516 pv = pmap_pvh_remove(pvh, pmap, va);
2527 pmap_insert_entry(pmap, va, m);
2533 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2553 pv = pmap_pvh_remove(&m->md, pmap, va);
2562 pmap_pvh_free(&m->md, pmap, va);
2568 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2572 pv = pmap_pvh_remove(pvh, pmap, va);
2574 free_pv_entry(pmap, pv);
2578 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
2583 pmap_pvh_free(&m->md, pmap, va);
2593 * (pmap, va).
2596 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2601 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2602 pv = get_pv_entry(pmap, FALSE);
2611 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2616 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2618 (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2630 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2637 (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2665 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2674 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2678 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
2694 pmap_remove_pde(pmap, pde, sva, &free);
2696 pmap_invalidate_pde_page(pmap, sva, oldpde);
2699 " in pmap %p", va, pmap);
2703 pmap->pm_stats.resident_count++;
2769 * Demote the mapping. This pmap is locked. The old PDE has
2776 pmap_update_pde(pmap, va, pde, newpde);
2777 else if (pmap == kernel_pmap)
2787 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
2799 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
2803 " in pmap %p", va, pmap);
2808 * Removes a 2- or 4MB page mapping from the kernel pmap.
2811 pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2817 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2818 mpte = pmap_remove_pt_page(pmap, va);
2834 pmap_update_pde(pmap, va, pde, newpde);
2841 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
2848 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
2856 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2861 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
2870 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2873 pmap_pvh_free(pvh, pmap, sva);
2886 if (pmap == kernel_pmap) {
2887 pmap_remove_kernel_pde(pmap, pdq, sva);
2889 mpte = pmap_remove_pt_page(pmap, sva);
2891 pmap->pm_stats.resident_count--;
2904 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
2911 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2914 ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va));
2916 pmap->pm_stats.wired_count -= 1;
2923 pmap->pm_stats.resident_count -= 1;
2930 pmap_remove_entry(pmap, m, va);
2932 return (pmap_unuse_pt(pmap, va, free));
2939 pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free)
2945 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2946 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
2948 pmap_remove_pte(pmap, pte, va, free);
2949 pmap_invalidate_page(pmap, va);
2959 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2970 if (pmap->pm_stats.resident_count == 0)
2978 PMAP_LOCK(pmap);
2986 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
2987 pmap_remove_page(pmap, sva, &free);
3000 if (pmap->pm_stats.resident_count == 0)
3004 ptpaddr = pmap->pm_pdir[pdirindex];
3028 pmap_remove_pde(pmap,
3029 &pmap->pm_pdir[pdirindex], sva, &free);
3031 } else if (!pmap_demote_pde(pmap,
3032 &pmap->pm_pdir[pdirindex], sva)) {
3046 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
3057 if (pmap_remove_pte(pmap, pte, sva, &free))
3064 pmap_invalidate_all(pmap);
3066 PMAP_UNLOCK(pmap);
3088 pmap_t pmap;
3104 pmap = PV_PMAP(pv);
3105 PMAP_LOCK(pmap);
3106 pde = pmap_pde(pmap, va);
3107 (void)pmap_demote_pde(pmap, pde, va);
3108 PMAP_UNLOCK(pmap);
3112 pmap = PV_PMAP(pv);
3113 PMAP_LOCK(pmap);
3114 pmap->pm_stats.resident_count--;
3115 pde = pmap_pde(pmap, pv->pv_va);
3118 pte = pmap_pte_quick(pmap, pv->pv_va);
3120 KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte",
3121 pmap, pv->pv_va));
3123 pmap->pm_stats.wired_count--;
3132 pmap_unuse_pt(pmap, pv->pv_va, &free);
3133 pmap_invalidate_page(pmap, pv->pv_va);
3135 free_pv_entry(pmap, pv);
3136 PMAP_UNLOCK(pmap);
3148 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
3155 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3195 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3204 pmap_remove(pmap, sva, eva);
3217 if (pmap_is_current(pmap))
3227 PMAP_LOCK(pmap);
3237 ptpaddr = pmap->pm_pdir[pdirindex];
3259 if (pmap_protect_pde(pmap,
3260 &pmap->pm_pdir[pdirindex], sva, prot))
3269 pmap);
3270 PMAP_UNLOCK(pmap);
3275 if (!pmap_demote_pde(pmap,
3276 &pmap->pm_pdir[pdirindex], sva)) {
3289 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
3326 pmap_invalidate_page(pmap, sva);
3333 pmap_invalidate_all(pmap);
3338 PMAP_UNLOCK(pmap);
3350 * promoted. The reason is that kernel PDEs are replicated in each pmap but
3352 * pmap.
3355 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
3362 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3369 firstpte = pmap_pte_quick(pmap, trunc_4mpage(va));
3375 " in pmap %p", va, pmap);
3378 if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
3381 " in pmap %p", va, pmap);
3407 " in pmap %p", va, pmap);
3422 " in pmap %p", oldpteva, pmap);
3427 " in pmap %p", va, pmap);
3444 if (pmap_insert_pt_page(pmap, mpte)) {
3447 "pmap_promote_pde: failure for va %#x in pmap %p", va,
3448 pmap);
3456 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
3468 pmap_update_pde(pmap, va, pde, PG_PS | newpde);
3469 else if (pmap == kernel_pmap)
3476 " in pmap %p", va, pmap);
3493 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3516 PMAP_LOCK(pmap);
3519 pde = pmap_pde(pmap, va);
3527 mpte = pmap_allocpte(pmap, va, flags);
3533 PMAP_UNLOCK(pmap);
3544 (uintmax_t)pmap->pm_pdir[PTDPTDI]));
3546 pmap_demote_pde(pmap, pde, va);
3548 pte = pmap_pte_quick(pmap, va);
3557 (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
3576 pmap->pm_stats.wired_count++;
3578 pmap->pm_stats.wired_count--;
3601 pmap->pm_stats.wired_count--;
3604 pv = pmap_pvh_remove(&om->md, pmap, va);
3613 pmap->pm_stats.resident_count++;
3622 pv = get_pv_entry(pmap, FALSE);
3627 free_pv_entry(pmap, pv);
3633 pmap->pm_stats.wired_count++;
3653 if (pmap == kernel_pmap)
3690 pmap_invalidate_page(pmap, va);
3703 pmap_promote_pde(pmap, pde, va);
3708 PMAP_UNLOCK(pmap);
3719 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3724 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3725 pde = pmap_pde(pmap, va);
3728 " in pmap %p", va, pmap);
3739 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
3741 " in pmap %p", va, pmap);
3755 pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
3765 " in pmap %p", va, pmap);
3782 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3795 PMAP_LOCK(pmap);
3800 pmap_enter_pde(pmap, va, m, prot))
3803 mpte = pmap_enter_quick_locked(pmap, va, m, prot,
3808 PMAP_UNLOCK(pmap);
3813 * 1. Current pmap & pmap exists.
3821 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3825 PMAP_LOCK(pmap);
3826 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
3828 PMAP_UNLOCK(pmap);
3832 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3842 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3862 ptepa = pmap->pm_pdir[ptepindex];
3874 mpte = _pmap_allocpte(pmap, ptepindex,
3886 * entering the page into the current pmap. In order to support
3887 * quick entry into any pmap, one would likely use pmap_pte_quick.
3903 !pmap_try_insert_pv_entry(pmap, va, m)) {
3906 if (pmap_unwire_ptp(pmap, mpte, &free)) {
3907 pmap_invalidate_page(pmap, va);
3919 pmap->pm_stats.resident_count++;
3929 if (pmap != kernel_pmap)
3956 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
4005 PMAP_LOCK(pmap);
4008 pde = pmap_pde(pmap, addr);
4012 pmap->pm_stats.resident_count += NBPDR /
4019 PMAP_UNLOCK(pmap);
4025 * addresses in the given pmap. Every valid mapping within that range
4033 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
4040 if (pmap_is_current(pmap))
4048 PMAP_LOCK(pmap);
4053 pde = pmap_pde(pmap, sva);
4072 pmap->pm_stats.wired_count -= NBPDR /
4079 PMAP_UNLOCK(pmap);
4085 if (!pmap_demote_pde(pmap, pde, sva))
4091 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
4100 * PG_W must be cleared atomically. Although the pmap
4107 pmap->pm_stats.wired_count--;
4114 PMAP_UNLOCK(pmap);
4418 * Returns true if the pmap's pv is one of the first
4425 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4437 if (PV_PMAP(pv) == pmap) {
4448 if (PV_PMAP(pv) == pmap) {
4493 pmap_t pmap;
4500 pmap = PV_PMAP(pv);
4501 PMAP_LOCK(pmap);
4502 pte = pmap_pte_quick(pmap, pv->pv_va);
4505 PMAP_UNLOCK(pmap);
4539 pmap_remove_pages(pmap_t pmap)
4552 if (pmap != PCPU_GET(curpmap)) {
4553 printf("warning: pmap_remove_pages called with non-current pmap\n");
4558 PMAP_LOCK(pmap);
4560 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4561 KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap,
4573 pte = pmap_pde(pmap, pv->pv_va);
4625 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
4633 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
4635 pmap->pm_stats.resident_count--;
4642 pmap->pm_stats.resident_count--;
4650 pmap_unuse_pt(pmap, pv->pv_va, &free);
4655 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4660 pmap_invalidate_all(pmap);
4662 PMAP_UNLOCK(pmap);
4706 pmap_t pmap;
4713 pmap = PV_PMAP(pv);
4714 PMAP_LOCK(pmap);
4715 pte = pmap_pte_quick(pmap, pv->pv_va);
4717 PMAP_UNLOCK(pmap);
4732 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
4739 PMAP_LOCK(pmap);
4740 pde = pmap_pde(pmap, addr);
4745 PMAP_UNLOCK(pmap);
4779 pmap_t pmap;
4786 pmap = PV_PMAP(pv);
4787 PMAP_LOCK(pmap);
4788 pte = pmap_pte_quick(pmap, pv->pv_va);
4790 PMAP_UNLOCK(pmap);
4806 pmap_t pmap;
4829 pmap = PV_PMAP(pv);
4830 PMAP_LOCK(pmap);
4831 pde = pmap_pde(pmap, va);
4833 (void)pmap_demote_pde(pmap, pde, va);
4834 PMAP_UNLOCK(pmap);
4838 pmap = PV_PMAP(pv);
4839 PMAP_LOCK(pmap);
4840 pde = pmap_pde(pmap, pv->pv_va);
4843 pte = pmap_pte_quick(pmap, pv->pv_va);
4857 pmap_invalidate_page(pmap, pv->pv_va);
4859 PMAP_UNLOCK(pmap);
4887 pmap_t pmap;
4904 pmap = PV_PMAP(pv);
4905 PMAP_LOCK(pmap);
4906 pde = pmap_pde(pmap, pv->pv_va);
4921 * number, and the pmap address to select one 4KB page
4935 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
4938 pmap_invalidate_page(pmap, pv->pv_va);
4942 PMAP_UNLOCK(pmap);
4956 pmap = PV_PMAP(pv);
4957 PMAP_LOCK(pmap);
4958 pde = pmap_pde(pmap, pv->pv_va);
4962 pte = pmap_pte_quick(pmap, pv->pv_va);
4967 pmap_invalidate_page(pmap, pv->pv_va);
4970 PMAP_UNLOCK(pmap);
4986 * given pmap. Depending on the advice, clear the referenced and/or
4990 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
5000 if (pmap_is_current(pmap))
5009 PMAP_LOCK(pmap);
5014 pde = pmap_pde(pmap, sva);
5025 pmap_invalidate_all(pmap);
5026 PMAP_UNLOCK(pmap);
5031 if (!pmap_demote_pde(pmap, pde, sva)) {
5046 pte = pmap_pte_quick(pmap, sva);
5049 pmap_remove_pte(pmap, pte, sva, NULL);
5056 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
5083 pmap_invalidate_range(pmap, va, sva);
5088 pmap_invalidate_range(pmap, va, sva);
5091 pmap_invalidate_all(pmap);
5096 PMAP_UNLOCK(pmap);
5107 pmap_t pmap;
5132 pmap = PV_PMAP(pv);
5133 PMAP_LOCK(pmap);
5134 pde = pmap_pde(pmap, va);
5137 if (pmap_demote_pde(pmap, pde, va)) {
5146 pte = pmap_pte_quick(pmap, va);
5159 pmap_invalidate_page(pmap, va);
5164 PMAP_UNLOCK(pmap);
5168 pmap = PV_PMAP(pv);
5169 PMAP_LOCK(pmap);
5170 pde = pmap_pde(pmap, pv->pv_va);
5173 pte = pmap_pte_quick(pmap, pv->pv_va);
5181 pmap_invalidate_page(pmap, pv->pv_va);
5183 PMAP_UNLOCK(pmap);
5544 * perform the pmap work for mincore
5547 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5554 PMAP_LOCK(pmap);
5556 pdep = pmap_pde(pmap, addr);
5565 ptep = pmap_pte(pmap, addr);
5587 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
5591 PMAP_UNLOCK(pmap);
5598 pmap_t pmap, oldpmap;
5603 pmap = vmspace_pmap(td->td_proc->p_vmspace);
5608 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
5611 CPU_SET(cpuid, &pmap->pm_active);
5614 cr3 = vtophys(pmap->pm_pdpt);
5616 cr3 = vtophys(pmap->pm_pdir);
5623 PCPU_SET(curpmap, pmap);
5628 pmap_activate_boot(pmap_t pmap)
5634 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
5636 CPU_SET(cpuid, &pmap->pm_active);
5638 PCPU_SET(curpmap, pmap);
5707 pmap_t pmap;
5720 pmap = vmspace_pmap(p->p_vmspace);
5726 pde = &pmap->pm_pdir[i];
5738 pte = pmap_pte(pmap, va);