• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/ppc/

Lines Matching refs:va

220  * pmap_map(va, spa, epa, prot)
222 * A virtual address range starting at "va" is mapped to the physical
226 * "va", "spa", and "epa" are byte addresses and must be on machine
238 vm_offset_t va,
251 if (spa == epa) return(va);
255 colladr = mapping_make(kernel_pmap, (addr64_t)va, (ppnum_t)(spa >> 12),
259 panic("pmap_map: attempt to map previously mapped range - va = %08X, pa = %08X, epa = %08X, collision = %016llX\n",
260 va, spa, epa, colladr);
262 return(va);
293 panic ("pmap_map_physical: mapping failure - va = %016llX, pa = %016llX, size = %016llX, collision = %016llX\n",
326 panic ("pmap_map_iohole: mapping failed - va = %016llX, pa = %016llX, size = %016llX, collision = %016llX\n",
911 addr64_t va, endva;
925 va = sva & -4096LL; /* Round start down to a page */
929 va = mapping_remove(pmap, va); /* Remove the mapping and see what's next */
930 va = va & -4096LL; /* Make sure the "not found" indication is clear */
931 if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */
1066 addr64_t va, endva;
1075 va = sva & -4096LL; /* Round start down to a page */
1079 mapping_protect(pmap, va, (prot & VM_PROT_ALL), &va); /* Change the protection and see what's next */
1080 if((va == 0) || (va >= endva)) break; /* End loop if we finish range or run off the end */
1101 pmap_enter(pmap_t pmap, vm_map_offset_t va, ppnum_t pa, vm_prot_t prot,
1120 colva = mapping_make(pmap, va, pa, mflags, 1, (prot & VM_PROT_ALL) ); /* Enter the mapping into the pmap */
1148 void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
1158 // kprintf("pmap_map_block: (%08X) va = %016llX, pa = %08X, size = %08X, prot = %08X, attr = %08X, flags = %08X\n", /* (BRINGUP) */
1159 // current_thread(), va, pa, size, prot, attr, flags); /* (BRINGUP) */
1164 colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
1167 panic("pmap_map_block: mapping error %d, pmap = %p, va = %016llX\n", (uint32_t)(colva & mapRetCode), pmap, va);
1173 int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
1186 colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
1194 * pmap_extract(pmap, va)
1196 * virtual address specified by pmap and va if the
1202 vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va) {
1215 gva = (unsigned int)va; /* Make sure we don't have a sign */
1226 ppoffset = (ppnum_t)(((gva & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */
1236 pa = (pa << 12) | (va & 0xFFF); /* Convert physical page number to address */
1243 * ppnum_t pmap_find_phys(pmap, addr64_t va)
1245 * virtual address specified by pmap and va if the
1250 ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va) {
1259 mp = mapping_find(pmap, va, &nextva, 1); /* Find the mapping for this address */
1267 ppoffset = (ppnum_t)(((va & -4096LL) - (mp->mpVAddr & -4096LL)) >> 12); /* Get offset from va to base va */
1452 __unused vm_map_offset_t va,
1725 panic("pmap_unnest: Attempt to unnest an unnested segment - va = %016llX\n", vaddr);
1729 panic("pmap_unnest: Attempt to unnest something that is not a nest - va = %016llX\n", vaddr);
1733 panic("pmap_unnest: Attempt to unnest something that is not at start of nest - va = %016llX\n", vaddr);
1837 * addr64_t MapUserMemoryWindow(vm_map_t map, vm_offset_t va, size)
1840 * va = start of the address range we are mapping
1887 addr64_t va) {
1893 baddrs = va & 0xFFFFFFFFF0000000ULL; /* Isolate the segment */
1899 return ((va & 0x0FFFFFFFULL) | lowGlo.lgUMWvaddr); /* Pass back the kernel address we are to use */
1919 return ((va & 0x0FFFFFFFULL) | lowGlo.lgUMWvaddr); /* Pass back the kernel address we are to use */
1929 kern_return_t dtrace_copyio_preflight(__unused addr64_t va)
1937 kern_return_t dtrace_copyio_postflight(__unused addr64_t va)
2079 __unused vm_offset_t va)