Lines Matching defs:pte

412  * one of the pte mapping routines (e.g. pmap_pte()) as the returned vaddr
414 * the caller is done using the returned pte pointer. When done, the caller
416 * vaddr for the returned pte can no longer be used
421 * return address of mapped pte for vaddr va in pmap pmap.
429 * maps the pde page, if any, containing the pte in and returns
430 * the address of the pte in that mapped page
446 return (vtopte(vaddr)); /* compat kernel still has pte's mapped */
734 pt_entry_t *pte;
735 pte = vtopte(va);
737 v = (c)va; va += ((n)*INTEL_PGBYTES); p = pte; pte += (n)
1158 pde |= pte_phys; /* take page frame from pte */
1167 * Free the now-unused level-1 pte.
1168 * Note: ptep is a virtual address to the pte in the
1175 DBG("ml_static_mfree(%p,0x%x) for pte\n",
1188 pt_entry_t *pte = NULL;
1189 if (0 == (pte = pmap_pte(kernel_pmap,
1191 panic("lowmem pte");
1194 pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo)
1394 /* allocate the vm_objs to hold the pdpt, pde and pte pages */
1403 panic("pmap_create pte obj");
1483 * Set the bottom 4 3rd-level pte's to be the kernel's.
2170 * If the pte page has any wired mappings, we cannot
2185 * Remove the virtual addresses mapped by this pte page.
2200 * And free the pte page itself.
2209 panic("pmap_collect: pte page not in object");
2364 pt_entry_t pte;
2372 pte = *opte | INTEL_PTE_USER|INTEL_PTE_GLOBAL;
2373 pte &= ~INTEL_PTE_WRITE; // ensure read only
2377 pmap_store_pte(npte, pte);
2399 panic("pmap_commpage64_init pte");
2417 pt_entry_t *pte;
2449 while ((pte = pmap_pte(kernel_pmap, (vm_map_offset_t)address)) == 0)
2451 * (int *) pte = 0;
2453 cp->mapwindow[i].prv_CMAP = pte;
2566 vm_offset_t pmap_high_map(pt_entry_t pte, enum high_cpu_types e)
2573 pmap_store_pte(pte_unique_base + a, pte);