• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10/xnu-2782.1.97/osfmk/i386/

Lines Matching defs:pmap

34 #include <vm/pmap.h>
41 * pmap locking
44 #define PMAP_LOCK(pmap) { \
45 simple_lock(&(pmap)->lock); \
48 #define PMAP_UNLOCK(pmap) { \
49 simple_unlock(&(pmap)->lock); \
52 #define PMAP_UPDATE_TLBS(pmap, s, e) \
53 pmap_flush_tlbs(pmap, s, e, 0, NULL)
58 #define PMAP_UPDATE_TLBS_DELAYED(pmap, s, e, c) \
59 pmap_flush_tlbs(pmap, s, e, PMAP_DELAY_TLB_FLUSH, c)
134 pmap_t pmap;
145 removing a panic from the code (in the case of the kernel pmap we cannot block
159 if (this pv is for this pmap/vaddr) {
181 structure accessed on a hash list via a hash of [pmap, vaddr]. These have been
198 pmap_t pmap;
205 pmap_t pmap;
223 pmap_t pmap; /* pmap where mapping lies */
232 pmap_t pmap;
417 * There are two structures in the pmap module that need locking:
420 * for the list in question.) Most routines want to lock a pmap and
426 * The system wide pmap lock has been removed. Now, paths take a lock
427 * on the pmap before changing its 'shape' and the reverse order lockers
431 * lets the pmap layer run (nearly completely) interrupt enabled, unlike
468 /* #define DEBUGINTERRUPTS 1 uncomment to ensure pmap callers have interrupts enabled */
472 panic("pmap interrupt assert %s, %d",__FILE__, __LINE__); \
482 pvhashidx(pmap_t pmap, vm_map_offset_t va)
484 uint32_t hashidx = ((uint32_t)(uintptr_t)pmap ^
504 pvhash_idx = pvhashidx(pvh->pmap, pvh->va);
533 pvhash_idx = pvhashidx(pvh_e->pmap, pvh_e->va);
552 pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
571 * identifiable as occurring due to issues beyond the control of the pmap module.
610 pmap_t pmap;
625 pmap_pagetable_corruption_log(pmap_pv_assertion_t incident, pmap_pagetable_corruption_t suppress_reason, pmap_pagetable_corruption_action_t action, pmap_t pmap, vm_map_offset_t vaddr, pt_entry_t *ptep, ppnum_t ppn, pmap_t pvpmap, vm_map_offset_t pvva) {
631 pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].pmap = pmap;
643 pmap_classify_pagetable_corruption(pmap_t pmap, vm_map_offset_t vaddr, ppnum_t *ppnp, pt_entry_t *ptep, pmap_pv_assertion_t incident) {
653 pmap_t pvpmap = pv_h->pmap;
673 if ((popcnt1((uintptr_t)pv_e->pmap ^ (uintptr_t)pmap) && pv_e->va == vaddr) ||
674 (pv_e->pmap == pmap && popcnt1(pv_e->va ^ vaddr))) {
675 pv_e->pmap = pmap;
691 if (npv_h->va == vaddr && npv_h->pmap == pmap) {
702 if (pmap == kernel_pmap) {
717 else if ((pmap != kernel_pmap) && ((cpte & INTEL_PTE_USER) == 0)) {
736 pmap_pagetable_corruption_log(incident, suppress_reason, action, pmap, vaddr, &cpte, *ppnp, pvpmap, pvva);
746 pmap_pv_remove(pmap_t pmap,
763 if (__improbable(pv_h->pmap == PMAP_NULL)) {
764 pmap_pagetable_corruption_action_t pac = pmap_classify_pagetable_corruption(pmap, vaddr, ppnp, pte, ROOT_ABSENT);
768 panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x, 0x%llx, %p, %p): null pv_list!", pmap, vaddr, ppn, *pte, ppnp, pte);
778 if (pv_h->va == vaddr && pv_h->pmap == pmap) {
793 pvhash_idx = pvhashidx(pvh_e->pmap, pvh_e->va);
800 pmap, vaddr, ppn);
804 pv_h->pmap = pvh_e->pmap;
808 pv_h->pmap = PMAP_NULL;
817 pvhash_idx = pvhashidx(pmap, vaddr);
822 pmap, vaddr, ppn, *pte, pte);
829 if (pvh_e->pmap == pmap &&
838 pmap_pagetable_corruption_action_t pac = pmap_classify_pagetable_corruption(pmap, vaddr, ppnp, pte, ROOT_PRESENT);
841 panic("Possible memory corruption: pmap_pv_remove(%p, 0x%llx, 0x%x, 0x%llx, %p, %p): pv not on hash, head: %p, 0x%llx", pmap, vaddr, ppn, *pte, ppnp, pte, pv_h->pmap, pv_h->va);
872 PMAP_ZINFO_PALLOC(pmap_t pmap, vm_size_t bytes)
878 pmap_ledger_credit(pmap, task_ledgers.tkm_private, bytes);
886 PMAP_ZINFO_PFREE(pmap_t pmap, vm_size_t bytes)
892 pmap_ledger_debit(pmap, task_ledgers.tkm_private, bytes);
900 PMAP_ZINFO_SALLOC(pmap_t pmap, vm_size_t bytes)
902 pmap_ledger_credit(pmap, task_ledgers.tkm_shared, bytes);
906 PMAP_ZINFO_SFREE(pmap_t pmap, vm_size_t bytes)
908 pmap_ledger_debit(pmap, task_ledgers.tkm_shared, bytes);
985 * The single pml4 page per pmap is allocated at pmap create time and exists
986 * for the duration of the pmap. we allocate this page in kernel vm.
991 pmap64_pml4(pmap_t pmap, vm_map_offset_t vaddr)
999 return PHYSMAP_PTOV(&((pml4_entry_t *)pmap->pm_cr3)[(vaddr >> PML4SHIFT) & (NPML4PG-1)]);
1001 return &pmap->pm_pml4[(vaddr >> PML4SHIFT) & (NPML4PG-1)];
1009 pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr)
1014 pml4 = pmap64_pml4(pmap, vaddr);
1026 pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr)
1031 pdpt = pmap64_pdpt(pmap, vaddr);
1053 * return address of mapped pte for vaddr va in pmap pmap.
1059 pmap_pte(pmap_t pmap, vm_map_offset_t vaddr)
1064 assert(pmap);
1065 pde = pmap64_pde(pmap, vaddr);