Lines Matching refs:pmap

62  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
97 __FBSDID("$FreeBSD: stable/11/sys/riscv/riscv/pmap.c 350276 2019-07-24 11:34:09Z kp $");
210 LIST_HEAD(pmaplist, pmap);
213 static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1");
215 struct pmap kernel_pmap_store;
233 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
234 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
236 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
237 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
239 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
241 static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
243 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
246 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
249 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
288 pmap_l1(pmap_t pmap, vm_offset_t va)
291 return (&pmap->pm_l1[pmap_l1_index(va)]);
307 pmap_l2(pmap_t pmap, vm_offset_t va)
311 l1 = pmap_l1(pmap, va);
335 pmap_l3(pmap_t pmap, vm_offset_t va)
339 l2 = pmap_l2(pmap, va);
362 pmap_is_current(pmap_t pmap)
365 return ((pmap == pmap_kernel()) ||
366 (pmap == curthread->td_proc->p_vmspace->vm_map.pmap));
396 pmap_resident_count_inc(pmap_t pmap, int count)
399 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
400 pmap->pm_stats.resident_count += count;
404 pmap_resident_count_dec(pmap_t pmap, int count)
407 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
408 KASSERT(pmap->pm_stats.resident_count >= count,
409 ("pmap %p resident count underflow %ld %d", pmap,
410 pmap->pm_stats.resident_count, count));
411 pmap->pm_stats.resident_count -= count;
415 pmap_distribute_l1(struct pmap *pmap, vm_pindex_t l1index,
418 struct pmap *user_pmap;
422 if (pmap != kernel_pmap)
567 rw_init(&pvh_global_lock, "pmap pv global");
720 * Initialize the pmap module.
721 * Called by vm_init, to initialize any structures that the pmap
732 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
738 rw_init(&pv_list_locks[i], "pmap pv list");
743 * We inline these within pmap.c for speed.
746 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
757 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
768 pmap_invalidate_all(pmap_t pmap)
785 pmap_extract(pmap_t pmap, vm_offset_t va)
792 PMAP_LOCK(pmap);
797 l2p = pmap_l2(pmap, va);
813 PMAP_UNLOCK(pmap);
821 * with the given pmap and virtual address pair
825 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
834 PMAP_LOCK(pmap);
836 l3p = pmap_l3(pmap, va);
840 if (vm_page_pa_tryrelock(pmap, phys, &pa))
847 PMAP_UNLOCK(pmap);
1084 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1089 _pmap_unwire_l3(pmap, va, m, free);
1097 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1101 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1108 l1 = pmap_l1(pmap, va);
1110 pmap_distribute_l1(pmap, pmap_l1_index(va), 0);
1115 l2 = pmap_l2(pmap, va);
1119 pmap_resident_count_dec(pmap, 1);
1125 l1 = pmap_l1(pmap, va);
1128 pmap_unwire_l3(pmap, va, pdpg, free);
1130 pmap_invalidate_page(pmap, va);
1151 pmap_unuse_l3(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1164 return (pmap_unwire_l3(pmap, va, mpte, free));
1168 pmap_pinit0(pmap_t pmap)
1171 PMAP_LOCK_INIT(pmap);
1172 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1173 pmap->pm_l1 = kernel_pmap->pm_l1;
1177 pmap_pinit(pmap_t pmap)
1190 pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys);
1193 pagezero(pmap->pm_l1);
1195 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1198 memcpy(pmap->pm_l1, kernel_pmap->pm_l1, PAGE_SIZE);
1201 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1218 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1225 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1234 PMAP_UNLOCK(pmap);
1238 PMAP_LOCK(pmap);
1261 l1 = &pmap->pm_l1[l1index];
1267 pmap_distribute_l1(pmap, l1index, entry);
1276 l1 = &pmap->pm_l1[l1index];
1279 if (_pmap_alloc_l3(pmap, NUPDE + l1index,
1304 pmap_resident_count_inc(pmap, 1);
1310 pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1325 l2 = pmap_l2(pmap, va);
1340 m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1354 * Called when a pmap initialized by pmap_pinit is being released.
1358 pmap_release(pmap_t pmap)
1362 KASSERT(pmap->pm_stats.resident_count == 0,
1363 ("pmap_release: pmap resident count %ld != 0",
1364 pmap->pm_stats.resident_count));
1366 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l1));
1371 /* Remove pmap from the allpmaps list */
1372 LIST_REMOVE(pmap, pm_list);
1375 bzero(pmap->pm_l1, PAGE_SIZE);
1531 * Returns NULL if PV entries were reclaimed from the specified pmap.
1548 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1554 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1566 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1567 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1568 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1572 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1599 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
1605 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1613 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1616 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1630 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1631 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1647 m = reclaim_pv_chunk(pmap, lockp);
1657 pc->pc_pmap = pmap;
1665 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1672 * First find and then remove the pv entry for the specified pmap and virtual
1678 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1684 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1694 * First find and then destroy the pv entry for the specified pmap and virtual
1699 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1703 pv = pmap_pvh_remove(pvh, pmap, va);
1706 free_pv_entry(pmap, pv);
1714 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1720 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1722 if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1736 pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
1743 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1744 if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3)))
1748 pmap_invalidate_page(pmap, va);
1750 pmap->pm_stats.wired_count -= 1;
1751 pmap_resident_count_dec(pmap, 1);
1760 pmap_pvh_free(&m->md, pmap, va);
1763 return (pmap_unuse_l3(pmap, va, l2e, free));
1773 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1785 if (pmap->pm_stats.resident_count == 0)
1792 PMAP_LOCK(pmap);
1796 if (pmap->pm_stats.resident_count == 0)
1799 l1 = pmap_l1(pmap, sva);
1843 pmap_invalidate_range(pmap, va, sva);
1850 if (pmap_remove_l3(pmap, l3, sva, l3_pte, &free,
1857 pmap_invalidate_range(pmap, va, sva);
1862 pmap_invalidate_all(pmap);
1864 PMAP_UNLOCK(pmap);
1885 pmap_t pmap;
1895 pmap = PV_PMAP(pv);
1896 PMAP_LOCK(pmap);
1897 pmap_resident_count_dec(pmap, 1);
1898 l2 = pmap_l2(pmap, pv->pv_va);
1907 if (pmap_is_current(pmap) &&
1912 pmap_invalidate_page(pmap, pv->pv_va);
1914 pmap->pm_stats.wired_count--;
1923 pmap_unuse_l3(pmap, pv->pv_va, pmap_load(l2), &free);
1926 free_pv_entry(pmap, pv);
1927 PMAP_UNLOCK(pmap);
1939 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1947 pmap_remove(pmap, sva, eva);
1954 PMAP_LOCK(pmap);
1957 l1 = pmap_l1(pmap, sva);
1988 pmap_invalidate_page(pmap, va);
1992 PMAP_UNLOCK(pmap);
1995 pmap_invalidate_all(pmap);
2011 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2057 PMAP_LOCK(pmap);
2061 mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
2067 PMAP_UNLOCK(pmap);
2070 l3 = pmap_l3(pmap, va);
2072 l3 = pmap_l3(pmap, va);
2075 l2 = pmap_l2(pmap, va);
2088 l1 = pmap_l1(pmap, va);
2092 pmap_distribute_l1(pmap, pmap_l1_index(va), entry);
2116 pmap_invalidate_page(pmap, va);
2135 pmap->pm_stats.wired_count++;
2138 pmap->pm_stats.wired_count--;
2166 if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3))
2173 pmap->pm_stats.wired_count++;
2174 pmap_resident_count_inc(pmap, 1);
2181 pv = get_pv_entry(pmap, &lock);
2207 pmap_pvh_free(&om->md, pmap, va);
2217 pmap_invalidate_page(pmap, va);
2218 if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
2224 PMAP_UNLOCK(pmap);
2241 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2256 PMAP_LOCK(pmap);
2259 mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock);
2265 PMAP_UNLOCK(pmap);
2270 * 1. Current pmap & pmap exists.
2278 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2284 PMAP_LOCK(pmap);
2285 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
2289 PMAP_UNLOCK(pmap);
2293 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2308 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2310 CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
2328 l2 = pmap_l2(pmap, va);
2345 mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
2370 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
2373 if (pmap_unwire_l3(pmap, va, mpte, &free)) {
2374 pmap_invalidate_page(pmap, va);
2385 pmap_resident_count_inc(pmap, 1);
2402 pmap_invalidate_page(pmap, va);
2412 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
2423 * addresses in the given pmap. Every valid mapping within that range
2431 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2439 PMAP_LOCK(pmap);
2441 l1 = pmap_l1(pmap, sva);
2468 * PG_W must be cleared atomically. Although the pmap
2473 pmap->pm_stats.wired_count--;
2478 PMAP_UNLOCK(pmap);
2605 * Returns true if the pmap's pv is one of the first
2612 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2626 if (PV_PMAP(pv) == pmap) {
2649 pmap_t pmap;
2662 pmap = PV_PMAP(pv);
2663 if (!PMAP_TRYLOCK(pmap)) {
2666 PMAP_LOCK(pmap);
2669 PMAP_UNLOCK(pmap);
2673 l3 = pmap_l3(pmap, pv->pv_va);
2676 PMAP_UNLOCK(pmap);
2685 * pmap. This pmap cannot be active on any processor besides the
2688 * This function cannot be applied to the kernel pmap. Moreover, it
2692 * destroy mappings by iterating over the pmap's collection of PV
2700 pmap_remove_pages(pmap_t pmap)
2718 PMAP_LOCK(pmap);
2719 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
2731 l2 = pmap_l2(pmap, pv->pv_va);
2756 if (pmap_is_current(pmap) &&
2761 pmap_invalidate_page(pmap, pv->pv_va);
2774 pmap_resident_count_dec(pmap, 1);
2778 pmap_unuse_l3(pmap, pv->pv_va, ptepde, &free);
2786 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2790 pmap_invalidate_all(pmap);
2794 PMAP_UNLOCK(pmap);
2809 pmap_t pmap;
2819 pmap = PV_PMAP(pv);
2820 if (!PMAP_TRYLOCK(pmap)) {
2823 PMAP_LOCK(pmap);
2826 PMAP_UNLOCK(pmap);
2830 l3 = pmap_l3(pmap, pv->pv_va);
2854 PMAP_UNLOCK(pmap);
2895 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2901 PMAP_LOCK(pmap);
2902 l3 = pmap_l3(pmap, addr);
2906 PMAP_UNLOCK(pmap);
2931 pmap_t pmap;
2954 pmap = PV_PMAP(pv);
2955 if (!PMAP_TRYLOCK(pmap)) {
2958 PMAP_LOCK(pmap);
2961 PMAP_UNLOCK(pmap);
2966 l3 = pmap_l3(pmap, pv->pv_va);
2977 pmap_invalidate_page(pmap, pv->pv_va);
2979 PMAP_UNLOCK(pmap);
2987 safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
3013 pmap_t pmap;
3037 pmap = PV_PMAP(pv);
3038 if (!PMAP_TRYLOCK(pmap)) {
3041 PMAP_LOCK(pmap);
3044 PMAP_UNLOCK(pmap);
3048 l2 = pmap_l2(pmap, pv->pv_va);
3058 if (safe_to_clear_referenced(pmap, old_l3)) {
3072 pmap_remove_l3(pmap, l3, pv->pv_va,
3074 pmap_invalidate_page(pmap, pv->pv_va);
3085 PMAP_UNLOCK(pmap);
3103 * given pmap. Depending on the advice, clear the referenced and/or
3107 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
3157 * RISCVTODO: Implement the below (from the amd64 pmap)
3168 * perform the pmap work for mincore
3171 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
3180 pmap_t pmap;
3183 pmap = vmspace_pmap(td->td_proc->p_vmspace);
3184 td->td_pcb->pcb_l1addr = vtophys(pmap->pm_l1);
3188 pmap_invalidate_all(pmap);