Lines Matching refs:pte

185 #define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
186 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
187 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
188 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
189 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
191 #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
192 atomic_clear_int((u_int *)(pte), PG_W))
193 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
327 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
349 static void pmap_pte_release(pt_entry_t *pte);
383 pt_entry_t *pte, *unused;
446 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
449 pte = vtopte(va);
462 SYSMAP(vm_offset_t, pte, pc->pc_qmap_addr, 1)
678 pt_entry_t *pte;
696 pte = vtopte(va);
697 if (*pte)
698 *pte |= pgeflag;
729 * Abuse the pte nodes for unmapped kva to thread a kva freelist through.
737 * - Assumes a vm_offset_t will fit in a pte (true for i386).
743 pt_entry_t *pte;
749 pte = vtopte(va);
750 *head = *pte;
753 *pte = 0;
760 pt_entry_t *pte;
764 pte = vtopte(va);
765 *pte = *head; /* virtual! PG_V is 0 though */
1402 * If the given pmap is not the current or kernel pmap, the returned pte must
1430 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte
1434 pmap_pte_release(pt_entry_t *pte)
1437 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
1512 pt_entry_t *pte;
1522 pte = pmap_pte(pmap, va);
1523 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
1524 pmap_pte_release(pte);
1542 pt_entry_t pte, *ptep;
1561 pte = *ptep;
1563 if (pte != 0 &&
1564 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
1565 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
1592 pt_entry_t *pte;
1594 pte = vtopte(va);
1595 pte_store(pte, pa | PG_RW | PG_V | pgeflag);
1601 pt_entry_t *pte;
1603 pte = vtopte(va);
1604 pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
1616 pt_entry_t *pte;
1618 pte = vtopte(va);
1619 pte_clear(pte);
1691 pt_entry_t *endpte, oldpte, pa, *pte;
1695 pte = vtopte(sva);
1696 endpte = pte + count;
1697 while (pte < endpte) {
1700 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
1701 oldpte |= *pte;
1702 pte_store(pte, pa | pgeflag | PG_RW | PG_V);
1704 pte++;
2029 * Here if the pte page isn't mapped, or if it has
2222 pt_entry_t *pte, tpte;
2268 pte = pmap_pte(pmap, va);
2269 tpte = *pte;
2271 tpte = pte_load_clear(pte);
2272 pmap_pte_release(pte);
2276 ("pmap_pv_reclaim: pmap %p va %x zero pte",
2652 pt_entry_t *pte;
2654 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
2655 *pte = newpte;
2893 ("pmap_remove_pde: pte page wire count error"));
2914 ("pmap_remove_pte: pmap %p va %x zero pte", pmap, va));
2941 pt_entry_t *pte;
2946 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
2948 pmap_remove_pte(pmap, pte, va, free);
2963 pt_entry_t *pte;
3046 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
3048 if (*pte == 0)
3055 if ((*pte & PG_G) == 0)
3057 if (pmap_remove_pte(pmap, pte, sva, &free))
3089 pt_entry_t *pte, tpte;
3118 pte = pmap_pte_quick(pmap, pv->pv_va);
3119 tpte = pte_load_clear(pte);
3120 KASSERT(tpte != 0, ("pmap_remove_all: pmap %p va %x zero pte",
3199 pt_entry_t *pte;
3289 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
3295 * Regardless of whether a pte is 32 or 64 bits in
3299 obits = pbits = *pte;
3318 if (!atomic_cmpset_64(pte, obits, pbits))
3321 if (!atomic_cmpset_int((u_int *)pte, obits,
3358 pt_entry_t *firstpte, oldpte, pa, *pte;
3401 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
3403 oldpte = *pte;
3415 if (!atomic_cmpset_int((u_int *)pte, oldpte,
3486 * that the related pte can not be reclaimed.
3497 pt_entry_t *pte;
3548 pte = pmap_pte_quick(pmap, va);
3555 if (pte == NULL) {
3562 origpte = *pte;
3581 * Remove extra pte reference
3658 * to update the pte.
3666 origpte = pte_load_store(pte, newpte);
3692 pte_store(pte, newpte);
3835 pt_entry_t newpte, *pte;
3890 pte = vtopte(va);
3891 if (*pte) {
3931 pte_store(pte, newpte);
4037 pt_entry_t *pte;
4067 * Regardless of whether a pde (or pte) is 32
4091 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
4093 if ((*pte & PG_V) == 0)
4095 if ((*pte & PG_W) == 0)
4096 panic("pmap_unwire: pte %#jx is missing PG_W",
4097 (uintmax_t)*pte);
4106 atomic_clear_int((u_int *)pte, PG_W);
4494 pt_entry_t *pte;
4502 pte = pmap_pte_quick(pmap, pv->pv_va);
4503 if ((*pte & PG_W) != 0)
4541 pt_entry_t *pte, tpte;
4573 pte = pmap_pde(pmap, pv->pv_va);
4574 tpte = *pte;
4576 pte = vtopte(pv->pv_va);
4577 tpte = *pte & ~PG_PTE_PAT;
4583 pte, pv->pv_va);
4584 panic("bad pte");
4606 pte_clear(pte);
4637 ("pmap_remove_pages: pte page wire count error"));
4705 pt_entry_t *pte;
4715 pte = pmap_pte_quick(pmap, pv->pv_va);
4716 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
4735 pt_entry_t *pte;
4742 pte = vtopte(addr);
4743 rv = *pte == 0;
4778 pt_entry_t *pte;
4788 pte = pmap_pte_quick(pmap, pv->pv_va);
4789 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
4808 pt_entry_t oldpte, *pte;
4843 pte = pmap_pte_quick(pmap, pv->pv_va);
4845 oldpte = *pte;
4848 * Regardless of whether a pte is 32 or 64 bits
4852 if (!atomic_cmpset_int((u_int *)pte, oldpte,
4889 pt_entry_t *pte;
4962 pte = pmap_pte_quick(pmap, pv->pv_va);
4963 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
4965 if ((*pte & PG_A) != 0) {
4966 atomic_clear_int((u_int *)pte, PG_A);
4993 pt_entry_t *pte;
5046 pte = pmap_pte_quick(pmap, sva);
5047 KASSERT((*pte & PG_V) != 0,
5049 pmap_remove_pte(pmap, pte, sva, NULL);
5056 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
5058 if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
5060 else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5067 m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
5070 atomic_clear_int((u_int *)pte, PG_M | PG_A);
5071 } else if ((*pte & PG_A) != 0)
5072 atomic_clear_int((u_int *)pte, PG_A);
5075 if ((*pte & PG_G) != 0) {
5109 pt_entry_t oldpte, *pte;
5146 pte = pmap_pte_quick(pmap, va);
5147 oldpte = *pte;
5150 * Regardless of whether a pte is 32 or 64 bits
5154 while (!atomic_cmpset_int((u_int *)pte,
5157 oldpte = *pte;
5173 pte = pmap_pte_quick(pmap, pv->pv_va);
5174 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5176 * Regardless of whether a pte is 32 or 64 bits
5180 atomic_clear_int((u_int *)pte, PG_M);
5195 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
5204 opte = *(u_int *)pte;
5207 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
5444 pt_entry_t *pte;
5501 pte = vtopte(tmpva);
5502 if (*pte == 0) {
5523 pte = vtopte(tmpva);
5524 if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
5525 pmap_pte_attr(pte, cache_bits_pte);
5550 pt_entry_t *ptep, pte;
5559 pte = *pdep;
5566 pte = *ptep;
5568 pa = pte & PG_FRAME;
5572 pte = 0;
5576 if ((pte & PG_V) != 0) {
5578 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5580 if ((pte & PG_A) != 0)
5585 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
5674 pt_entry_t *pte;
5678 pte = vtopte(qaddr);
5680 KASSERT(*pte == 0, ("pmap_quick_enter_page: PTE busy"));
5681 *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
5692 pt_entry_t *pte;
5695 pte = vtopte(qaddr);
5697 KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use"));
5700 *pte = 0;
5723 pt_entry_t *pte;
5738 pte = pmap_pte(pmap, va);
5739 if (pte && pmap_pte_v(pte)) {
5742 pa = *pte;