Lines Matching refs:pmap

43  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
201 struct pmap kernel_pmap_store;
202 LIST_HEAD(pmaplist, pmap);
219 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
224 * This lock is defined as static in other pmap implementations. It cannot,
282 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
283 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
284 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
285 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
288 static vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va,
292 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
294 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
296 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
298 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
301 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
303 static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
304 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
305 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
308 static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr);
317 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
323 pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type)
325 vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]);
356 * and just syncs the pmap module with what has already been done.
382 * Initialize the kernel pmap (which is statically allocated).
395 rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE);
482 mtx_init(&createdelete_lock, "pmap create/delete", NULL, MTX_DEF);
603 * Initialize the pmap module.
604 * Called by vm_init, to initialize any structures that the pmap
616 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
618 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
736 * invalidated. This can happen one of two ways: (1) The pmap becomes
739 * table is globally performed. (2) The pmap becomes active on another
742 * pmap as inactive on the other processor.
749 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
754 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x",
755 pmap, va);
758 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
765 if (CPU_ISSET(cpuid, &pmap->pm_active))
767 CPU_AND(&other_cpus, &pmap->pm_active);
776 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
782 CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x",
783 pmap, sva, eva);
786 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
794 if (CPU_ISSET(cpuid, &pmap->pm_active))
797 CPU_AND(&other_cpus, &pmap->pm_active);
806 pmap_invalidate_all(pmap_t pmap)
811 CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap);
814 if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
821 if (CPU_ISSET(cpuid, &pmap->pm_active))
823 CPU_AND(&other_cpus, &pmap->pm_active);
842 * We inline these within pmap.c for speed.
845 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
847 CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x",
848 pmap, va);
850 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
856 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
861 CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x",
862 pmap, sva, eva);
864 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
871 pmap_invalidate_all(pmap_t pmap)
874 CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap);
876 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
946 * a pmap's page table is in use because a kernel thread is borrowing
951 pmap_is_current(pmap_t pmap)
954 return (pmap == kernel_pmap ||
955 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
956 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
960 * If the given pmap is not the current or kernel pmap, the returned pte must
964 pmap_pte(pmap_t pmap, vm_offset_t va)
969 pde = pmap_pde(pmap, va);
974 if (pmap_is_current(pmap))
980 CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x",
981 pmap, va, (*PMAP2 & 0xffffffff));
1021 * If the given pmap is not the current pmap, pvh_global_lock
1025 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
1030 pde = pmap_pde(pmap, va);
1035 if (pmap_is_current(pmap))
1042 CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x",
1043 pmap, va, (u_long)*PMAP1);
1070 pmap_extract(pmap_t pmap, vm_offset_t va)
1078 PMAP_LOCK(pmap);
1079 pde = pmap->pm_pdir[va >> PDRSHIFT];
1083 PMAP_UNLOCK(pmap);
1086 pte = pmap_pte(pmap, va);
1091 PMAP_UNLOCK(pmap);
1101 pmap_extract_ma(pmap_t pmap, vm_offset_t va)
1108 PMAP_LOCK(pmap);
1109 pde = pmap->pm_pdir[va >> PDRSHIFT];
1113 PMAP_UNLOCK(pmap);
1116 pte = pmap_pte(pmap, va);
1120 PMAP_UNLOCK(pmap);
1128 * with the given pmap and virtual address pair
1132 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1141 PMAP_LOCK(pmap);
1143 pde = PT_GET(pmap_pde(pmap, va));
1147 if (vm_page_pa_tryrelock(pmap, (pde &
1155 ptep = pmap_pte(pmap, va);
1160 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
1169 PMAP_UNLOCK(pmap);
1354 pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
1359 _pmap_unwire_ptp(pmap, m, free);
1366 _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
1374 xen_pt_unpin(pmap->pm_pdir[m->pindex]);
1378 PD_CLEAR_VA(pmap, m->pindex, TRUE);
1380 --pmap->pm_stats.resident_count;
1394 pmap_invalidate_page(pmap, pteva);
1409 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
1416 ptepde = PT_GET(pmap_pde(pmap, va));
1418 return (pmap_unwire_ptp(pmap, mpte, free));
1422 * Initialize the pmap for the swapper process.
1425 pmap_pinit0(pmap_t pmap)
1428 PMAP_LOCK_INIT(pmap);
1430 * Since the page table directory is shared with the kernel pmap,
1431 * which is already included in the list "allpmaps", this pmap does
1434 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
1436 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
1438 CPU_ZERO(&pmap->pm_active);
1439 PCPU_SET(curpmap, pmap);
1440 TAILQ_INIT(&pmap->pm_pvchunk);
1441 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1445 * Initialize a preallocated and zeroed pmap structure,
1449 pmap_pinit(pmap_t pmap)
1463 if (pmap->pm_pdir == NULL) {
1464 pmap->pm_pdir = (pd_entry_t *)kva_alloc(NBPTD);
1465 if (pmap->pm_pdir == NULL) {
1472 pmap->pm_pdpt = (pd_entry_t *)kva_alloc(1);
1489 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
1493 pagezero(pmap->pm_pdir + (i * NPDEPG));
1496 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1498 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
1502 pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1);
1504 bzero(pmap->pm_pdpt, PAGE_SIZE);
1509 pmap->pm_pdpt[i] = ma | PG_V;
1518 pd = pmap->pm_pdir + (i * NPDEPG);
1526 PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW);
1533 PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE);
1537 CPU_ZERO(&pmap->pm_active);
1538 TAILQ_INIT(&pmap->pm_pvchunk);
1539 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1552 _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
1563 PMAP_UNLOCK(pmap);
1567 PMAP_LOCK(pmap);
1584 pmap->pm_stats.resident_count++;
1588 PT_SET_VA_MA(&pmap->pm_pdir[ptepindex],
1591 KASSERT(pmap->pm_pdir[ptepindex],
1597 pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
1611 ptema = pmap->pm_pdir[ptepindex];
1621 pmap->pm_pdir[ptepindex] = 0;
1623 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1639 CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x",
1640 pmap, va, flags);
1641 m = _pmap_allocpte(pmap, ptepindex, flags);
1645 KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex));
1657 * Deal with a SMP shootdown of other users of the pmap that we are
1690 pmap_lazyfix(pmap_t pmap)
1696 mask = pmap->pm_active;
1707 lazyptd = vtophys(pmap->pm_pdpt);
1709 lazyptd = vtophys(pmap->pm_pdir);
1716 lazymask = &pmap->pm_active;
1720 (u_int)&pmap->pm_active);
1732 mask = pmap->pm_active;
1745 pmap_lazyfix(pmap_t pmap)
1749 cr3 = vtophys(pmap->pm_pdir);
1752 CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active);
1759 * Called when a pmap initialized by pmap_pinit is being released.
1763 pmap_release(pmap_t pmap)
1774 KASSERT(pmap->pm_stats.resident_count == 0,
1775 ("pmap_release: pmap resident count %ld != 0",
1776 pmap->pm_stats.resident_count));
1783 pmap_lazyfix(pmap);
1785 LIST_REMOVE(pmap, pm_list);
1789 ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME);
1790 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
1792 ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt));
1807 KASSERT(VM_PAGE_TO_MACH(m) == (pmap->pm_pdpt[i] & PG_FRAME),
1815 pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1);
1850 struct pmap *pmap;
1896 LIST_FOREACH(pmap, &allpmaps, pm_list)
1897 PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE);
1974 pmap_t pmap;
1983 pmap = NULL;
1989 if (pmap != pc->pc_pmap) {
1990 if (pmap != NULL) {
1991 pmap_invalidate_all(pmap);
1992 if (pmap != locked_pmap)
1993 PMAP_UNLOCK(pmap);
1995 pmap = pc->pc_pmap;
1997 if (pmap > locked_pmap)
1998 PMAP_LOCK(pmap);
1999 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
2000 pmap = NULL;
2016 pte = pmap_pte(pmap, va);
2024 ("pmap_pv_reclaim: pmap %p va %x zero pte",
2025 pmap, va));
2027 pmap_invalidate_page(pmap, va);
2037 pmap_unuse_pt(pmap, va, &free);
2046 pmap->pm_stats.resident_count -= freed;
2050 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2053 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2061 if (pmap == locked_pmap)
2078 if (pmap != NULL) {
2079 pmap_invalidate_all(pmap);
2080 if (pmap != locked_pmap)
2081 PMAP_UNLOCK(pmap);
2098 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2104 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2119 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
2121 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2122 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
2127 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2153 get_pv_entry(pmap_t pmap, boolean_t try)
2162 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2169 "increasing either the vm.pmap.shpgperproc or the "
2170 "vm.pmap.pv_entry_max tunable.\n");
2172 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2189 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2190 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2207 m = pmap_pv_reclaim(pmap);
2217 pc->pc_pmap = pmap;
2223 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2229 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2235 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2244 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2248 pv = pmap_pvh_remove(pvh, pmap, va);
2250 free_pv_entry(pmap, pv);
2254 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
2258 pmap_pvh_free(&m->md, pmap, va);
2267 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2271 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2274 (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2286 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
2291 CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x",
2292 pmap, (u_long)*ptq, va);
2295 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2299 ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va));
2301 pmap->pm_stats.wired_count -= 1;
2308 pmap->pm_stats.resident_count -= 1;
2315 pmap_remove_entry(pmap, m, va);
2317 return (pmap_unuse_pt(pmap, va, free));
2324 pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
2328 CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x",
2329 pmap, va);
2333 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2334 if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0)
2336 pmap_remove_pte(pmap, pte, va, free);
2337 pmap_invalidate_page(pmap, va);
2350 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2358 CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x",
2359 pmap, sva, eva);
2364 if (pmap->pm_stats.resident_count == 0)
2371 PMAP_LOCK(pmap);
2379 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
2380 pmap_remove_page(pmap, sva, &free);
2393 if (pmap->pm_stats.resident_count == 0)
2397 ptpaddr = pmap->pm_pdir[pdirindex];
2410 PD_CLEAR_VA(pmap, pdirindex, TRUE);
2411 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2424 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
2435 if (pmap_remove_pte(pmap, pte, sva, &free))
2444 pmap_invalidate_all(pmap);
2447 PMAP_UNLOCK(pmap);
2468 pmap_t pmap;
2478 pmap = PV_PMAP(pv);
2479 PMAP_LOCK(pmap);
2480 pmap->pm_stats.resident_count--;
2481 pte = pmap_pte_quick(pmap, pv->pv_va);
2484 KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte",
2485 pmap, pv->pv_va));
2487 pmap->pm_stats.wired_count--;
2496 pmap_unuse_pt(pmap, pv->pv_va, &free);
2497 pmap_invalidate_page(pmap, pv->pv_va);
2499 free_pv_entry(pmap, pv);
2500 PMAP_UNLOCK(pmap);
2516 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2523 CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x",
2524 pmap, sva, eva, prot);
2527 pmap_remove(pmap, sva, eva);
2544 PMAP_LOCK(pmap);
2554 ptpaddr = pmap->pm_pdir[pdirindex];
2568 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
2571 pmap->pm_pdir[pdirindex] |= pg_nx;
2580 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
2614 pmap_invalidate_page(pmap, sva);
2624 pmap_invalidate_all(pmap);
2627 PMAP_UNLOCK(pmap);
2643 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2655 "pmap_enter: pmap=%08p va=0x%08x ma=0x%08x prot=0x%x flags=0x%x",
2656 pmap, va, VM_PAGE_TO_MACH(m), prot, flags);
2669 PMAP_LOCK(pmap);
2677 mpte = pmap_allocpte(pmap, va, flags);
2683 PMAP_UNLOCK(pmap);
2688 pde = pmap_pde(pmap, va);
2691 pte = pmap_pte_quick(pmap, va);
2698 (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va);
2725 pmap->pm_stats.wired_count++;
2727 pmap->pm_stats.wired_count--;
2750 pmap->pm_stats.wired_count--;
2753 pv = pmap_pvh_remove(&om->md, pmap, va);
2764 pmap->pm_stats.resident_count++;
2773 pv = get_pv_entry(pmap, FALSE);
2778 free_pv_entry(pmap, pv);
2784 pmap->pm_stats.wired_count++;
2804 if (pmap == kernel_pmap)
2838 pmap_invalidate_page(pmap, va);
2850 PMAP_UNLOCK(pmap);
2867 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2882 PMAP_LOCK(pmap);
2884 mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m,
2899 PMAP_UNLOCK(pmap);
2904 * 1. Current pmap & pmap exists.
2912 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2918 CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x",
2919 pmap, va, m, prot);
2922 PMAP_LOCK(pmap);
2923 (void)pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL);
2927 PMAP_UNLOCK(pmap);
2932 pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count)
2938 PMAP_LOCK(pmap);
2940 if (!pmap_is_prefaultable_locked(pmap, *addrs))
2943 (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL);
2956 PMAP_UNLOCK(pmap);
2961 pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m,
2973 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2993 ptema = pmap->pm_pdir[ptepindex];
3005 mpte = _pmap_allocpte(pmap, ptepindex,
3017 * entering the page into the current pmap. In order to support
3018 * quick entry into any pmap, one would likely use pmap_pte_quick.
3021 KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap"));
3035 !pmap_try_insert_pv_entry(pmap, va, m)) {
3038 if (pmap_unwire_ptp(pmap, mpte, &free)) {
3039 pmap_invalidate_page(pmap, va);
3051 pmap->pm_stats.resident_count++;
3109 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3158 PMAP_LOCK(pmap);
3161 pde = pmap_pde(pmap, addr);
3165 pmap->pm_stats.resident_count += NBPDR /
3172 PMAP_UNLOCK(pmap);
3178 * addresses in the given pmap. Every valid mapping within that range
3186 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3192 CTR3(KTR_PMAP, "pmap_unwire: pmap=%p sva=0x%x eva=0x%x", pmap, sva,
3196 PMAP_LOCK(pmap);
3201 pde = pmap_pde(pmap, sva);
3209 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
3217 pmap->pm_stats.wired_count--;
3225 PMAP_UNLOCK(pmap);
3330 ("no pmap copy expected: 0x%jx saw: 0x%jx",
3515 * Returns true if the pmap's pv is one of the first
3522 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3533 if (PV_PMAP(pv) == pmap) {
3556 pmap_t pmap;
3565 pmap = PV_PMAP(pv);
3566 PMAP_LOCK(pmap);
3567 pte = pmap_pte_quick(pmap, pv->pv_va);
3570 PMAP_UNLOCK(pmap);
3598 pmap_remove_pages(pmap_t pmap)
3609 CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap);
3611 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
3612 printf("warning: pmap_remove_pages called with non-current pmap\n");
3616 KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap"));
3617 PMAP_LOCK(pmap);
3619 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
3620 KASSERT(pc->pc_pmap == pmap, ("Wrong pmap %p %p", pmap,
3673 pmap_unuse_pt(pmap, pv->pv_va, &free);
3680 pmap->pm_stats.resident_count--;
3685 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3694 pmap_invalidate_all(pmap);
3696 PMAP_UNLOCK(pmap);
3711 pmap_t pmap;
3729 pmap = PV_PMAP(pv);
3730 PMAP_LOCK(pmap);
3731 pte = pmap_pte_quick(pmap, pv->pv_va);
3733 PMAP_UNLOCK(pmap);
3751 pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr)
3758 if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) {
3766 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3770 PMAP_LOCK(pmap);
3771 rv = pmap_is_prefaultable_locked(pmap, addr);
3772 PMAP_UNLOCK(pmap);
3781 pmap_t pmap;
3790 pmap = PV_PMAP(pv);
3791 PMAP_LOCK(pmap);
3792 pte = pmap_pte_quick(pmap, pv->pv_va);
3794 PMAP_UNLOCK(pmap);
3806 pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len)
3811 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE));
3821 pmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len)
3826 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE));
3842 pmap_t pmap;
3859 pmap = PV_PMAP(pv);
3860 PMAP_LOCK(pmap);
3861 pte = pmap_pte_quick(pmap, pv->pv_va);
3878 pmap_invalidate_page(pmap, pv->pv_va);
3880 PMAP_UNLOCK(pmap);
3906 pmap_t pmap;
3920 pmap = PV_PMAP(pv);
3921 PMAP_LOCK(pmap);
3922 pte = pmap_pte_quick(pmap, pv->pv_va);
3925 pmap_invalidate_page(pmap, pv->pv_va);
3930 PMAP_UNLOCK(pmap);
3943 * given pmap. Depending on the advice, clear the referenced and/or
3947 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
3960 PMAP_LOCK(pmap);
3965 oldpde = pmap->pm_pdir[sva >> PDRSHIFT];
3970 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
3992 pmap_invalidate_page(pmap, sva);
4001 pmap_invalidate_all(pmap);
4004 PMAP_UNLOCK(pmap);
4014 pmap_t pmap;
4033 pmap = PV_PMAP(pv);
4034 PMAP_LOCK(pmap);
4035 pte = pmap_pte_quick(pmap, pv->pv_va);
4043 pmap_invalidate_page(pmap, pv->pv_va);
4045 PMAP_UNLOCK(pmap);
4255 * perform the pmap work for mincore
4258 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
4264 PMAP_LOCK(pmap);
4266 ptep = pmap_pte(pmap, addr);
4282 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
4286 PMAP_UNLOCK(pmap);
4293 pmap_t pmap, oldpmap;
4298 pmap = vmspace_pmap(td->td_proc->p_vmspace);
4303 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
4306 CPU_SET(cpuid, &pmap->pm_active);
4309 cr3 = vtophys(pmap->pm_pdpt);
4311 cr3 = vtophys(pmap->pm_pdir);
4319 PCPU_SET(curpmap, pmap);
4355 pmap_t pmap;
4367 LIST_FOREACH(pmap, &allpmaps, pm_list) {
4377 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME;
4388 pmap_t pmap;
4396 LIST_FOREACH(pmap, &allpmaps, pm_list) {
4406 pdirma = pmap->pm_pdpt[pdir] & PG_FRAME;
4408 mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V;
4417 pmap_t pmap;
4430 pmap = vmspace_pmap(p->p_vmspace);
4436 pde = &pmap->pm_pdir[i];
4448 pte = pmap_pte(pmap, va);
4480 /* print address space of pmap*/
4509 pmap_t pmap;
4515 pmap = PV_PMAP(pv);
4516 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va);
4517 pads(pmap);