• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/i386/

Lines Matching refs:pte

221 #define	iswired(pte)	((pte) & INTEL_PTE_WIRED)
293 for (every page/pte in the space) {
294 calc pve_ptr from the ppn in the pte
804 * one of the pte mapping routines (e.g. pmap_pte()) as the returned vaddr
806 * the caller is done using the returned pte pointer. When done, the caller
808 * vaddr for the returned pte can no longer be used
813 * return address of mapped pte for vaddr va in pmap pmap.
821 * maps the pde page, if any, containing the pte in and returns
822 * the address of the pte in that mapped page
836 return (vtopte(vaddr)); /* compat kernel still has pte's mapped */
909 pt_entry_t *pte;
929 pte = pmap_pte(kernel_pmap, (vm_map_offset_t)virt);
930 if (pte == PT_ENTRY_NULL) {
933 pmap_store_pte(pte, template);
1137 pt_entry_t *pte;
1199 v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n)
1202 pte = vtopte(va);
1254 pte = pmap_pte(kernel_pmap, 0);
1255 pmap_store_pte(pte, INTEL_PTE_INVALID);
1258 if (0 == (pte = pmap_pte(kernel_pmap,0x2000))) panic("lowmem pte");
1260 pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)|INTEL_PTE_VALID|INTEL_PTE_REF|INTEL_PTE_MOD|INTEL_PTE_WIRED|INTEL_PTE_RW);
1662 /* allocate the vm_objs to hold the pdpt, pde and pte pages */
1671 panic("pmap_create pte obj");
1751 * Set the bottom 4 3rd-level pte's to be the kernel's.
1911 * The virtual address is the va for the first pte.
1915 * entirely within one pte-page. This is NOT checked.
1916 * Assumes that the pte-page exists.
2268 pt_entry_t *pte;
2323 pte = pmap_pte(pmap, vaddr);
2325 if (0 == pte) {
2340 pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
2344 pmap_phys_attributes[pai] |= *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
2346 pmap_store_pte(pte, 0);
2387 pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WRITE));
2557 * that the related pte cannot be reclaimed.
2572 register pt_entry_t *pte;
2622 * Expand pmap to include this pte. Assume that
2627 while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
2636 old_pa = pte_to_pa(*pte);
2649 old_pa = pte_to_pa(*pte);
2685 if (!iswired(*pte))
2689 if (iswired(*pte)) {
2696 pmap_update_pte(pte, *pte, template | (*pte & (INTEL_PTE_REF | INTEL_PTE_MOD)));
2727 pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_VALID));
2731 oattr = (char)(*pte & (PHYS_MODIFIED | PHYS_REFERENCED));
2733 pmap_store_pte(pte, 0);
2744 if (iswired(*pte)) {
2829 if (iswired(*pte)) {
2948 pmap_store_pte(pte, template);
2951 * to prevent pmap_page_protect et al from finding it until the pte
2987 register pt_entry_t *pte;
2995 if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL)
2996 panic("pmap_change_wiring: pte missing");
2998 if (wired && !iswired(*pte)) {
3003 pmap_update_pte(pte, *pte, (*pte | INTEL_PTE_WIRED));
3005 else if (!wired && iswired(*pte)) {
3011 pmap_update_pte(pte, *pte, (*pte & ~INTEL_PTE_WIRED));
3432 * If the pte page has any wired mappings, we cannot
3447 * Remove the virtual addresses mapped by this pte page.
3462 * And free the pte page itself.
3471 panic("pmap_collect: pte page not in object");
3540 register pt_entry_t *pte;
3589 pte = pmap_pte(pmap, va);
3590 pmap_update_pte(pte, *pte, (*pte & ~bits));
3623 register pt_entry_t *pte;
3687 pte = pmap_pte(pmap, va);
3688 attributes |= *pte & bits;
4126 pt_entry_t pte;
4134 pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
4135 pte &= ~INTEL_PTE_WRITE; // ensure read only
4139 pmap_store_pte(npte, pte);
4161 panic("pmap_commpage64_init pte");
4179 pt_entry_t *pte;
4211 while ((pte = pmap_pte(kernel_pmap, (vm_map_offset_t)address)) == 0)
4213 * (int *) pte = 0;
4215 cp->mapwindow[i].prv_CMAP = pte;
4481 vm_offset_t pmap_high_map(pt_entry_t pte, enum high_cpu_types e)
4488 pmap_store_pte(pte_unique_base + a, pte);