• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/riscv/riscv/

Lines Matching refs:pmap

64  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
219 LIST_HEAD(pmaplist, pmap);
222 struct pmap kernel_pmap_store;
239 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
240 "VM/pmap parameters");
288 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
289 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
291 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
292 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
294 static bool pmap_demote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va);
295 static bool pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2,
297 static int pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2,
299 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
301 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
303 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
306 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
309 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
349 pmap_l1(pmap_t pmap, vm_offset_t va)
352 return (&pmap->pm_l1[pmap_l1_index(va)]);
368 pmap_l2(pmap_t pmap, vm_offset_t va)
372 l1 = pmap_l1(pmap, va);
394 pmap_l3(pmap_t pmap, vm_offset_t va)
398 l2 = pmap_l2(pmap, va);
410 pmap_resident_count_inc(pmap_t pmap, int count)
413 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
414 pmap->pm_stats.resident_count += count;
418 pmap_resident_count_dec(pmap_t pmap, int count)
421 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
422 KASSERT(pmap->pm_stats.resident_count >= count,
423 ("pmap %p resident count underflow %ld %d", pmap,
424 pmap->pm_stats.resident_count, count));
425 pmap->pm_stats.resident_count -= count;
429 pmap_distribute_l1(struct pmap *pmap, vm_pindex_t l1index,
432 struct pmap *user_pmap;
436 if (pmap != kernel_pmap)
574 rw_init(&pvh_global_lock, "pmap pv global");
577 * Set the current CPU as active in the kernel pmap. Secondary cores
669 * Initialize the pmap module.
670 * Called by vm_init, to initialize any structures that the pmap
680 * Initialize the pv chunk and pmap list mutexes.
682 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
689 rw_init(&pv_list_locks[i], "pmap pv list");
719 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
724 mask = pmap->pm_active;
734 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
739 mask = pmap->pm_active;
754 pmap_invalidate_all(pmap_t pmap)
759 mask = pmap->pm_active;
776 * We inline these within pmap.c for speed.
779 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
786 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
797 pmap_invalidate_all(pmap_t pmap)
811 pmap_extract(pmap_t pmap, vm_offset_t va)
818 PMAP_LOCK(pmap);
823 l2p = pmap_l2(pmap, va);
839 PMAP_UNLOCK(pmap);
847 * with the given pmap and virtual address pair
851 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
858 PMAP_LOCK(pmap);
859 l3p = pmap_l3(pmap, va);
868 PMAP_UNLOCK(pmap);
1053 pmap_ps_enabled(pmap_t pmap __unused)
1080 * Inserts the specified page table page into the specified pmap's collection
1081 * of idle page table pages. Each of a pmap's page table pages is responsible
1082 * for mapping a distinct range of virtual addresses. The pmap's collection is
1088 pmap_insert_pt_page(pmap_t pmap, vm_page_t ml3, bool promoted)
1091 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1093 return (vm_radix_insert(&pmap->pm_root, ml3));
1098 * specified pmap's collection of idle page table pages, and returns it.
1103 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
1106 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1107 return (vm_radix_remove(&pmap->pm_root, pmap_l2_pindex(va)));
1117 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1122 _pmap_unwire_ptp(pmap, va, m, free);
1130 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1134 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1137 l1 = pmap_l1(pmap, va);
1139 pmap_distribute_l1(pmap, pmap_l1_index(va), 0);
1142 l2 = pmap_l2(pmap, va);
1145 pmap_resident_count_dec(pmap, 1);
1150 l1 = pmap_l1(pmap, va);
1153 pmap_unwire_ptp(pmap, va, pdpg, free);
1155 pmap_invalidate_page(pmap, va);
1171 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1180 return (pmap_unwire_ptp(pmap, va, mpte, free));
1184 pmap_pinit0(pmap_t pmap)
1187 PMAP_LOCK_INIT(pmap);
1188 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1189 pmap->pm_l1 = kernel_pmap->pm_l1;
1190 pmap->pm_satp = SATP_MODE_SV39 | (vtophys(pmap->pm_l1) >> PAGE_SHIFT);
1191 CPU_ZERO(&pmap->pm_active);
1192 pmap_activate_boot(pmap);
1196 pmap_pinit(pmap_t pmap)
1209 pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys);
1210 pmap->pm_satp = SATP_MODE_SV39 | (l1phys >> PAGE_SHIFT);
1213 pagezero(pmap->pm_l1);
1215 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1217 CPU_ZERO(&pmap->pm_active);
1220 memcpy(pmap->pm_l1, kernel_pmap->pm_l1, PAGE_SIZE);
1224 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1227 vm_radix_init(&pmap->pm_root);
1244 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1251 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1260 PMAP_UNLOCK(pmap);
1264 PMAP_LOCK(pmap);
1287 l1 = &pmap->pm_l1[l1index];
1293 pmap_distribute_l1(pmap, l1index, entry);
1299 l1 = &pmap->pm_l1[l1index];
1302 if (_pmap_alloc_l3(pmap, NUL1E + l1index,
1324 pmap_resident_count_inc(pmap, 1);
1330 pmap_alloc_l2(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1337 l1 = pmap_l1(pmap, va);
1345 l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
1353 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1368 l2 = pmap_l2(pmap, va);
1383 m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1396 * Called when a pmap initialized by pmap_pinit is being released.
1400 pmap_release(pmap_t pmap)
1404 KASSERT(pmap->pm_stats.resident_count == 0,
1405 ("pmap_release: pmap resident count %ld != 0",
1406 pmap->pm_stats.resident_count));
1407 KASSERT(CPU_EMPTY(&pmap->pm_active),
1408 ("releasing active pmap %p", pmap));
1411 LIST_REMOVE(pmap, pm_list);
1414 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l1));
1570 * Returns NULL if PV entries were reclaimed from the specified pmap.
1587 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1593 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1605 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1606 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1607 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1611 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1636 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
1642 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1650 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1653 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1667 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1668 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1684 m = reclaim_pv_chunk(pmap, lockp);
1692 pc->pc_pmap = pmap;
1700 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1707 * Ensure that the number of spare PV entries in the specified pmap meets or
1713 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1722 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1729 * contrast, these chunks must be added to the pmap upon allocation.
1734 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1747 m = reclaim_pv_chunk(pmap, lockp);
1757 pc->pc_pmap = pmap;
1761 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1765 * The reclaim might have freed a chunk from the current pmap.
1780 * First find and then remove the pv entry for the specified pmap and virtual
1786 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1792 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1802 * First find and then destroy the pv entry for the specified pmap and virtual
1807 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1811 pv = pmap_pvh_remove(pvh, pmap, va);
1814 free_pv_entry(pmap, pv);
1822 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1828 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1830 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1846 pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1857 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1867 pv = pmap_pvh_remove(pvh, pmap, va);
1875 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1894 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1895 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1899 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1900 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1907 pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1922 pv = pmap_pvh_remove(&m->md, pmap, va);
1932 pmap_pvh_free(&m->md, pmap, va);
1943 pmap_pv_insert_l2(pmap_t pmap, vm_offset_t va, pd_entry_t l2e, u_int flags,
1950 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1952 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
1965 pmap_remove_kernel_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
1972 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
1973 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1975 ml3 = pmap_remove_pt_page(pmap, va);
2001 pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
2009 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2022 pmap_invalidate_range(pmap, sva, sva + L2_SIZE);
2024 pmap->pm_stats.wired_count -= L2_SIZE / PAGE_SIZE;
2025 pmap_resident_count_dec(pmap, L2_SIZE / PAGE_SIZE);
2029 pmap_pvh_free(pvh, pmap, sva);
2042 if (pmap == kernel_pmap) {
2043 pmap_remove_kernel_l2(pmap, l2, sva);
2045 ml3 = pmap_remove_pt_page(pmap, sva);
2049 pmap_resident_count_dec(pmap, 1);
2057 return (pmap_unuse_pt(pmap, sva, l1e, free));
2064 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2072 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2074 pmap_invalidate_page(pmap, va);
2076 pmap->pm_stats.wired_count -= 1;
2077 pmap_resident_count_dec(pmap, 1);
2086 pmap_pvh_free(&m->md, pmap, va);
2095 return (pmap_unuse_pt(pmap, va, l2e, free));
2105 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2116 if (pmap->pm_stats.resident_count == 0)
2122 PMAP_LOCK(pmap);
2126 if (pmap->pm_stats.resident_count == 0)
2129 l1 = pmap_l1(pmap, sva);
2151 (void)pmap_remove_l2(pmap, l2, sva,
2154 } else if (!pmap_demote_l2_locked(pmap, l2, sva,
2177 pmap_invalidate_range(pmap, va, sva);
2184 if (pmap_remove_l3(pmap, l3, sva, l2e, &free, &lock)) {
2190 pmap_invalidate_range(pmap, va, sva);
2195 PMAP_UNLOCK(pmap);
2217 pmap_t pmap;
2231 pmap = PV_PMAP(pv);
2232 PMAP_LOCK(pmap);
2234 l2 = pmap_l2(pmap, va);
2235 (void)pmap_demote_l2(pmap, l2, va);
2236 PMAP_UNLOCK(pmap);
2239 pmap = PV_PMAP(pv);
2240 PMAP_LOCK(pmap);
2241 pmap_resident_count_dec(pmap, 1);
2242 l2 = pmap_l2(pmap, pv->pv_va);
2251 pmap_invalidate_page(pmap, pv->pv_va);
2253 pmap->pm_stats.wired_count--;
2262 pmap_unuse_pt(pmap, pv->pv_va, pmap_load(l2), &free);
2265 free_pv_entry(pmap, pv);
2266 PMAP_UNLOCK(pmap);
2278 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2288 pmap_remove(pmap, sva, eva);
2304 PMAP_LOCK(pmap);
2306 l1 = pmap_l1(pmap, sva);
2342 pmap);
2343 PMAP_UNLOCK(pmap);
2348 if (!pmap_demote_l2(pmap, l2, sva)) {
2378 pmap_invalidate_all(pmap);
2381 PMAP_UNLOCK(pmap);
2385 pmap_fault(pmap_t pmap, vm_offset_t va, vm_prot_t ftype)
2392 PMAP_LOCK(pmap);
2393 l2 = pmap_l2(pmap, va);
2405 if ((pmap != kernel_pmap && (oldpte & PTE_U) == 0) ||
2425 PMAP_UNLOCK(pmap);
2430 pmap_demote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va)
2436 rv = pmap_demote_l2_locked(pmap, l2, va, &lock);
2447 pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2457 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2462 if ((oldl2 & PTE_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
2469 (void)pmap_remove_l2(pmap, l2, va & ~L2_OFFSET,
2470 pmap_load(pmap_l1(pmap, va)), &free, lockp);
2473 "failure for va %#lx in pmap %p", va, pmap);
2478 pmap_resident_count_inc(pmap, 1);
2519 reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
2530 pmap_pv_demote_l2(pmap, va, PTE_TO_PHYS(oldl2), lockp);
2533 CTR2(KTR_PMAP, "pmap_demote_l2_locked: success for va %#lx in pmap %p",
2534 va, pmap);
2540 pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2547 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2557 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx pmap %p",
2558 va, pmap);
2569 * fault. In the latter case, the pmap lock provides atomicity. Thus,
2585 "pmap_promote_l2: failure for va %#lx pmap %p",
2586 va, pmap);
2598 "pmap_promote_l2: failure for va %#lx pmap %p",
2599 va, pmap);
2609 if (pmap_insert_pt_page(pmap, ml3, true)) {
2610 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx pmap %p",
2611 va, pmap);
2617 pmap_pv_promote_l2(pmap, va, PTE_TO_PHYS(firstl3e), lockp);
2622 CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
2623 pmap);
2640 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2691 PMAP_LOCK(pmap);
2697 rv = pmap_enter_l2(pmap, va, new_l3, flags, m, &lock);
2701 l2 = pmap_l2(pmap, va);
2703 ((l2e & PTE_RWX) == 0 || pmap_demote_l2_locked(pmap, l2,
2712 mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
2718 PMAP_UNLOCK(pmap);
2721 l3 = pmap_l3(pmap, va);
2723 l3 = pmap_l3(pmap, va);
2738 l1 = pmap_l1(pmap, va);
2742 pmap_distribute_l1(pmap, pmap_l1_index(va), entry);
2760 pmap_invalidate_page(pmap, va);
2779 pmap->pm_stats.wired_count++;
2782 pmap->pm_stats.wired_count--;
2810 * pmap keep a consistent view of the mapping, which is
2824 * The pmap lock is sufficient to synchronize with
2833 pv = pmap_pvh_remove(&om->md, pmap, va);
2837 free_pv_entry(pmap, pv);
2844 pmap_invalidate_page(pmap, va);
2851 pmap->pm_stats.wired_count++;
2852 pmap_resident_count_inc(pmap, 1);
2859 pv = get_pv_entry(pmap, &lock);
2875 pmap_sync_icache(pmap, va, PAGE_SIZE);
2882 pmap_invalidate_page(pmap, va);
2894 pmap_ps_enabled(pmap) &&
2897 pmap_promote_l2(pmap, l2, va, &lock);
2905 PMAP_UNLOCK(pmap);
2917 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2923 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2933 return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
2950 pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
2958 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2960 if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
2962 CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
2963 va, pmap);
2975 "pmap_enter_l2: failure for va %#lx in pmap %p",
2976 va, pmap);
2981 (void)pmap_remove_l2(pmap, l2, va,
2982 pmap_load(pmap_l1(pmap, va)), &free, lockp);
2987 pmap_remove_l3(pmap, l3, sva, oldl2, &free,
2998 if (pmap_insert_pt_page(pmap, mt, false))
3009 if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
3011 if (pmap_unwire_ptp(pmap, va, l2pg, &free)) {
3018 pmap_invalidate_page(pmap, va);
3022 "pmap_enter_l2: failure for va %#lx in pmap %p",
3023 va, pmap);
3035 pmap->pm_stats.wired_count += L2_SIZE / PAGE_SIZE;
3036 pmap->pm_stats.resident_count += L2_SIZE / PAGE_SIZE;
3044 CTR2(KTR_PMAP, "pmap_enter_l2: success for va %#lx in pmap %p",
3045 va, pmap);
3063 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3078 PMAP_LOCK(pmap);
3082 m->psind == 1 && pmap_ps_enabled(pmap) &&
3083 pmap_enter_2mpage(pmap, va, m, prot, &lock))
3086 mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte,
3093 PMAP_UNLOCK(pmap);
3098 * 1. Current pmap & pmap exists.
3106 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3112 PMAP_LOCK(pmap);
3113 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3117 PMAP_UNLOCK(pmap);
3121 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3133 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3135 CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3153 l2 = pmap_l2(pmap, va);
3170 mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
3195 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3198 if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3199 pmap_invalidate_page(pmap, va);
3210 pmap_resident_count_inc(pmap, 1);
3226 pmap_sync_icache(pmap, va, PAGE_SIZE);
3230 pmap_invalidate_page(pmap, va);
3240 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3251 * addresses in the given pmap. Every valid mapping within that range
3259 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3268 PMAP_LOCK(pmap);
3270 l1 = pmap_l1(pmap, sva);
3296 PMAP_UNLOCK(pmap);
3302 if (!pmap_demote_l2(pmap, l2, sva))
3318 * PG_W must be cleared atomically. Although the pmap
3323 pmap->pm_stats.wired_count--;
3328 PMAP_UNLOCK(pmap);
3441 * Returns true if the pmap's pv is one of the first
3448 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3463 if (PV_PMAP(pv) == pmap) {
3474 if (PV_PMAP(pv) == pmap) {
3499 pmap_t pmap;
3513 pmap = PV_PMAP(pv);
3514 if (!PMAP_TRYLOCK(pmap)) {
3517 PMAP_LOCK(pmap);
3520 PMAP_UNLOCK(pmap);
3524 l2 = pmap_l2(pmap, pv->pv_va);
3530 PMAP_UNLOCK(pmap);
3535 pmap = PV_PMAP(pv);
3536 if (!PMAP_TRYLOCK(pmap)) {
3540 PMAP_LOCK(pmap);
3544 PMAP_UNLOCK(pmap);
3548 l2 = pmap_l2(pmap, pv->pv_va);
3551 PMAP_UNLOCK(pmap);
3581 pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv,
3588 pmap_resident_count_dec(pmap, Ln_ENTRIES);
3598 mpte = pmap_remove_pt_page(pmap, pv->pv_va);
3602 pmap_resident_count_dec(pmap, 1);
3609 pmap_resident_count_dec(pmap, 1);
3623 * pmap. This pmap cannot be active on any processor besides the
3626 * This function cannot be applied to the kernel pmap. Moreover, it
3630 * destroy mappings by iterating over the pmap's collection of PV
3638 pmap_remove_pages(pmap_t pmap)
3656 PMAP_LOCK(pmap);
3657 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
3669 pte = pmap_l1(pmap, pv->pv_va);
3717 pmap_remove_pages_pv(pmap, m, pv, &free,
3719 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
3727 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3733 pmap_invalidate_all(pmap);
3735 PMAP_UNLOCK(pmap);
3747 pmap_t pmap;
3763 pmap = PV_PMAP(pv);
3764 if (!PMAP_TRYLOCK(pmap)) {
3767 PMAP_LOCK(pmap);
3770 PMAP_UNLOCK(pmap);
3774 l2 = pmap_l2(pmap, pv->pv_va);
3779 PMAP_UNLOCK(pmap);
3786 pmap = PV_PMAP(pv);
3787 if (!PMAP_TRYLOCK(pmap)) {
3791 PMAP_LOCK(pmap);
3795 PMAP_UNLOCK(pmap);
3799 l2 = pmap_l2(pmap, pv->pv_va);
3801 PMAP_UNLOCK(pmap);
3840 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3846 PMAP_LOCK(pmap);
3847 l3 = pmap_l3(pmap, addr);
3851 PMAP_UNLOCK(pmap);
3878 pmap_t pmap;
3898 pmap = PV_PMAP(pv);
3899 if (!PMAP_TRYLOCK(pmap)) {
3902 PMAP_LOCK(pmap);
3905 PMAP_UNLOCK(pmap);
3911 l2 = pmap_l2(pmap, va);
3913 (void)pmap_demote_l2_locked(pmap, l2, va, &lock);
3917 PMAP_UNLOCK(pmap);
3920 pmap = PV_PMAP(pv);
3921 if (!PMAP_TRYLOCK(pmap)) {
3925 PMAP_LOCK(pmap);
3928 PMAP_UNLOCK(pmap);
3933 l2 = pmap_l2(pmap, pv->pv_va);
3945 pmap_invalidate_page(pmap, pv->pv_va);
3947 PMAP_UNLOCK(pmap);
3977 pmap_t pmap;
4000 pmap = PV_PMAP(pv);
4001 if (!PMAP_TRYLOCK(pmap)) {
4004 PMAP_LOCK(pmap);
4007 PMAP_UNLOCK(pmap);
4012 l2 = pmap_l2(pmap, va);
4028 * and the pmap address to select one 4KB page out of
4042 (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
4045 pmap_invalidate_page(pmap, va);
4050 PMAP_UNLOCK(pmap);
4065 pmap = PV_PMAP(pv);
4066 if (!PMAP_TRYLOCK(pmap)) {
4070 PMAP_LOCK(pmap);
4073 PMAP_UNLOCK(pmap);
4077 l2 = pmap_l2(pmap, pv->pv_va);
4095 pmap_invalidate_page(pmap, pv->pv_va);
4100 PMAP_UNLOCK(pmap);
4118 * given pmap. Depending on the advice, clear the referenced and/or
4122 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4134 pmap_t pmap;
4162 pmap = PV_PMAP(pv);
4163 if (!PMAP_TRYLOCK(pmap)) {
4166 PMAP_LOCK(pmap);
4169 PMAP_UNLOCK(pmap);
4174 l2 = pmap_l2(pmap, va);
4178 pmap_demote_l2_locked(pmap, l2, va, &lock) &&
4188 pmap_invalidate_page(pmap, va);
4190 PMAP_UNLOCK(pmap);
4193 pmap = PV_PMAP(pv);
4194 if (!PMAP_TRYLOCK(pmap)) {
4198 PMAP_LOCK(pmap);
4201 PMAP_UNLOCK(pmap);
4205 l2 = pmap_l2(pmap, pv->pv_va);
4211 pmap_invalidate_page(pmap, pv->pv_va);
4213 PMAP_UNLOCK(pmap);
4242 * Perform the pmap work for mincore(2). If the page is not both referenced and
4243 * modified by this pmap, returns its physical address so that the caller can
4247 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
4254 PMAP_LOCK(pmap);
4255 l2 = pmap_l2(pmap, addr);
4264 PMAP_UNLOCK(pmap);
4284 PMAP_UNLOCK(pmap);
4291 pmap_t oldpmap, pmap;
4295 pmap = vmspace_pmap(td->td_proc->p_vmspace);
4296 if (pmap == oldpmap)
4298 load_satp(pmap->pm_satp);
4302 CPU_SET_ATOMIC(hart, &pmap->pm_active);
4305 CPU_SET(hart, &pmap->pm_active);
4308 PCPU_SET(curpmap, pmap);
4323 pmap_activate_boot(pmap_t pmap)
4329 CPU_SET_ATOMIC(hart, &pmap->pm_active);
4331 CPU_SET(hart, &pmap->pm_active);
4333 PCPU_SET(curpmap, pmap);
4337 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
4466 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
4473 pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l1, pd_entry_t **l2,
4479 l1p = pmap_l1(pmap, va);
4611 * Iterate over the kernel page tables without holding the kernel pmap