Lines Matching defs:pvo

203  * Lock for the pteg and pvo tables.
217 struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */
219 uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
220 uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */
799 * Initialize the lock that synchronizes access to the pteg and pvo
806 * Initialise the unmanaged pvo pool.
971 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
1030 struct pvo_entry key, *pvo;
1036 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1037 pvo != NULL && PVO_VADDR(pvo) < eva;
1038 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1039 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1040 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1041 panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1042 pvo);
1043 pvo->pvo_vaddr &= ~PVO_WIRED;
1044 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
1046 &pvo->pvo_pte.lpte);
1047 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1054 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1055 pvo->pvo_vpn);
1258 /* XXX change the pvo head for fake pages */
1387 struct pvo_entry *pvo;
1391 pvo = moea64_pvo_find_va(pm, va);
1392 if (pvo == NULL)
1395 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
1396 (va - PVO_VADDR(pvo));
1409 struct pvo_entry *pvo;
1417 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1418 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1419 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1422 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
1424 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1540 struct pvo_entry *pvo;
1544 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1545 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
1576 struct pvo_entry *pvo;
1594 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1595 pmap = pvo->pvo_pmap;
1597 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1598 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1599 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1600 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1602 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
1603 lo |= pvo->pvo_pte.lpte.pte_lo;
1604 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1606 &pvo->pvo_pte.lpte, pvo->pvo_vpn);
1607 if (pvo->pvo_pmap == kernel_pmap)
1646 struct pvo_entry *pvo;
1660 LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1661 pmap = pvo->pvo_pmap;
1663 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1664 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
1665 pvo->pvo_pte.lpte.pte_lo |= lo;
1667 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1668 pvo->pvo_vpn);
1669 if (pvo->pvo_pmap == kernel_pmap)
1715 struct pvo_entry *pvo;
1726 pvo = moea64_pvo_find_va(kernel_pmap, va);
1727 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1729 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1778 struct pvo_entry *pvo;
1786 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1787 if (pvo->pvo_pmap == pmap) {
1805 struct pvo_entry *pvo;
1812 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1813 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1925 moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
1937 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1942 oldlo = pvo->pvo_pte.lpte.pte_lo;
1943 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1944 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1946 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1948 pvo->pvo_pte.lpte.pte_lo |= LPTE_BW;
1950 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1952 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1958 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1959 pvo->pvo_vpn);
1961 (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1964 moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
1965 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE);
1972 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED &&
1975 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
1977 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
1987 struct pvo_entry *pvo, *tpvo, key;
2003 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2004 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2005 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2006 moea64_pvo_protect(mmu, pm, pvo, prot);
2079 struct pvo_entry *pvo, *tpvo;
2083 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2084 if (!(pvo->pvo_vaddr & PVO_WIRED))
2085 moea64_pvo_remove(mmu, pvo);
2097 struct pvo_entry *pvo, *tpvo, key;
2108 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2109 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2110 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2111 moea64_pvo_remove(mmu, pvo);
2124 struct pvo_entry *pvo, *next_pvo;
2128 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2129 pmap = pvo->pvo_pmap;
2131 moea64_pvo_remove(mmu, pvo);
2192 struct pvo_entry *pvo;
2226 * Remove any existing mapping for this page. Reuse the pvo entry if
2231 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2232 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2233 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2234 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2243 (pvo->pvo_vaddr & PVO_WIRED) == 0) {
2244 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2245 pvo->pvo_vaddr |= PVO_WIRED;
2246 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2249 (pvo->pvo_vaddr & PVO_WIRED) != 0) {
2250 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2251 pvo->pvo_vaddr &= ~PVO_WIRED;
2252 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
2255 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2260 &pvo->pvo_pte.lpte);
2262 PVO_PTEGIDX_SET(pvo, i);
2271 &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2275 moea64_pvo_remove(mmu, pvo);
2289 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2293 pvo = uma_zalloc(zone, M_NOWAIT);
2296 if (pvo == NULL)
2300 pvo->pvo_vaddr = va;
2301 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
2303 pvo->pvo_pmap = pm;
2304 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2305 pvo->pvo_vaddr &= ~ADDR_POFF;
2308 pvo->pvo_vaddr |= PVO_WIRED;
2310 pvo->pvo_vaddr |= PVO_MANAGED;
2312 pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2314 pvo->pvo_vaddr |= PVO_LARGE;
2316 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2322 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
2331 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2334 if (pvo->pvo_vaddr & PVO_WIRED) {
2335 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2343 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
2345 PVO_PTEGIDX_SET(pvo, i);
2367 moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
2372 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2379 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2381 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2382 PVO_PTEGIDX_CLR(pvo);
2390 pvo->pvo_pmap->pm_stats.resident_count--;
2391 if (pvo->pvo_vaddr & PVO_WIRED)
2392 pvo->pvo_pmap->pm_stats.wired_count--;
2397 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2403 LIST_REMOVE(pvo, pvo_olink);
2408 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2410 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) {
2411 LIST_REMOVE(pvo, pvo_vlink);
2412 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
2413 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
2415 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
2427 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2428 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2429 moea64_upvo_zone, pvo);
2444 struct pvo_entry *pvo;
2448 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2452 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2464 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2467 * See if this pvo has a valid PTE. if so, fetch the
2471 PMAP_LOCK(pvo->pvo_pmap);
2472 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2474 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2475 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2476 PMAP_UNLOCK(pvo->pvo_pmap);
2481 PMAP_UNLOCK(pvo->pvo_pmap);
2492 struct pvo_entry *pvo;
2497 * we can reset the right ones). note that since the pvo entries and
2505 * For each pvo entry, clear the pvo's ptebit. If this pvo has a
2510 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2511 PMAP_LOCK(pvo->pvo_pmap);
2512 pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2514 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2515 if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2517 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte,
2518 pvo->pvo_vpn, ptebit);
2521 pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2522 PMAP_UNLOCK(pvo->pvo_pmap);
2532 struct pvo_entry *pvo, key;
2538 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2540 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2541 if (pvo == NULL ||
2542 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
2604 struct pvo_entry *pvo;
2613 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2614 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) {
2615 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
2639 struct pvo_entry *pvo;
2669 pvo = moea64_pvo_find_va(kernel_pmap,
2671 if (pvo != NULL &&
2672 (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
2684 pvo = moea64_pvo_find_va(kernel_pmap,
2686 if (pvo == NULL ||
2687 !(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))