• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10/xnu-2782.1.97/osfmk/i386/

Lines Matching defs:pmap

31 #include <vm/pmap.h>
37 pmap_t pmap,
43 pmap_t pmap,
50 pmap_t pmap,
70 * grand = the pmap that we will nest subord into
71 * subord = the pmap that goes into the grand
72 * va_start = start of range in pmap to be inserted
73 * nstart = start of range in pmap nested pmap
76 * Inserts a pmap into another. This is used to implement shared segments.
79 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
209 * grand = the pmap that we will un-nest subord from
210 * vaddr = start of range in pmap to be unnested
212 * Removes a pmap from another. This is used to implement shared segments.
301 * given virtual address in a given pmap.
307 pmap_find_phys(pmap_t pmap, addr64_t va)
322 if (!pmap->ref_count)
325 pdep = pmap_pde(pmap, va);
333 ptp = pmap_pte(pmap, va);
356 pmap_t pmap;
372 if (pv_h->pmap != PMAP_NULL) {
377 pmap = pv_e->pmap;
379 ptep = pmap_pte(pmap, vaddr);
382 panic("pmap_update_cache_attributes_locked: Missing PTE, pmap: %p, pn: 0x%x vaddr: 0x%llx kernel_pmap: %p", pmap, pn, vaddr, kernel_pmap);
386 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
421 register pmap_t pmap,
429 (void) pmap_enter_options(pmap, vaddr, pn, prot, fault_type, flags, wired, PMAP_EXPAND_OPTIONS_NONE, NULL);
435 register pmap_t pmap,
466 if (pmap == PMAP_NULL)
478 pmap,
482 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled)
487 if (__improbable(set_NX && (pmap == kernel_pmap) && ((pmap_disable_kstack_nx && (flags & VM_MEM_STACK)) || (pmap_disable_kheap_nx && !(flags & VM_MEM_STACK))))) {
493 * zalloc may cause pageout (which will lock the pmap system).
503 PMAP_LOCK(pmap);
506 * Expand pmap to include this pte. Assume that
507 * pmap is always expanded to include enough hardware
511 while ((pte = pmap64_pde(pmap, vaddr)) == PD_ENTRY_NULL) {
513 PMAP_UNLOCK(pmap);
514 kr_expand = pmap_expand_pdpt(pmap, vaddr, options);
517 PMAP_LOCK(pmap);
520 while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
522 * Must unlock to expand the pmap
525 PMAP_UNLOCK(pmap);
526 kr_expand = pmap_expand(pmap, vaddr, options);
529 PMAP_LOCK(pmap);
533 PMAP_UNLOCK(pmap);
544 delpage_pde_index = pdeidx(pmap, vaddr);
545 delpage_pm_obj = pmap->pm_obj;
556 OSAddAtomic64(-1, &pmap->stats.compressed);
596 if (pmap != kernel_pmap)
608 OSAddAtomic(+1, &pmap->stats.wired_count);
609 pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
613 assert(pmap->stats.wired_count >= 1);
614 OSAddAtomic(-1, &pmap->stats.wired_count);
615 pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
671 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
680 pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
681 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
682 assert(pmap->stats.resident_count >= 1);
683 OSAddAtomic(-1, &pmap->stats.resident_count);
684 if (pmap != kernel_pmap) {
686 assert(pmap->stats.reusable > 0);
687 OSAddAtomic(-1, &pmap->stats.reusable);
689 assert(pmap->stats.internal > 0);
690 OSAddAtomic(-1, &pmap->stats.internal);
692 assert(pmap->stats.external > 0);
693 OSAddAtomic(-1, &pmap->stats.external);
697 assert(pmap->stats.wired_count >= 1);
698 OSAddAtomic(-1, &pmap->stats.wired_count);
699 pmap_ledger_debit(pmap, task_ledgers.wired_mem,
710 pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte);
719 if (pmap != kernel_pmap) {
721 assert(pmap->stats.device > 0);
722 OSAddAtomic(-1, &pmap->stats.device);
726 assert(pmap->stats.wired_count >= 1);
727 OSAddAtomic(-1, &pmap->stats.wired_count);
728 pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
752 if (pv_h->pmap == PMAP_NULL) {
757 pv_h->pmap = pmap;
782 * the kernel pmap we'll use one of
789 if (kernel_pmap == pmap) {
793 PMAP_UNLOCK(pmap);
794 pmap_pv_throttle(pmap);
805 pvh_e->pmap = pmap;
819 pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
820 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
821 OSAddAtomic(+1, &pmap->stats.resident_count);
822 if (pmap->stats.resident_count > pmap->stats.resident_max) {
823 pmap->stats.resident_max = pmap->stats.resident_count;
825 if (pmap != kernel_pmap) {
827 OSAddAtomic(+1, &pmap->stats.reusable);
828 PMAP_STATS_PEAK(pmap->stats.reusable);
830 OSAddAtomic(+1, &pmap->stats.internal);
831 PMAP_STATS_PEAK(pmap->stats.internal);
833 OSAddAtomic(+1, &pmap->stats.external);
834 PMAP_STATS_PEAK(pmap->stats.external);
841 pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
842 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
843 OSAddAtomic(+1, &pmap->stats.resident_count);
844 if (pmap != kernel_pmap) {
846 OSAddAtomic(+1, &pmap->stats.device);
847 PMAP_STATS_PEAK(pmap->stats.device);
870 if (pmap != kernel_pmap)
878 OSAddAtomic(+1, & pmap->stats.wired_count);
879 pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
896 PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
898 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
906 PMAP_UNLOCK(pmap);
918 PMAP_ZINFO_PFREE(pmap, PAGE_SIZE);
931 * The pmap must be locked.
932 * If the pmap is not the kernel pmap, the range must lie
939 pmap_t pmap,
944 pmap_remove_range_options(pmap, start_vaddr, spte, epte, 0);
949 pmap_t pmap,
984 if (pmap != kernel_pmap &&
1026 PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
1065 pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte);
1091 if (pmap->stats.resident_count < num_removed)
1094 pmap_ledger_debit(pmap, task_ledgers.phys_mem, machine_ptob(num_removed));
1095 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, machine_ptob(num_removed));
1096 assert(pmap->stats.resident_count >= num_removed);
1097 OSAddAtomic(-num_removed, &pmap->stats.resident_count);
1099 if (pmap != kernel_pmap) {
1101 assert(pmap->stats.device >= num_device);
1103 OSAddAtomic(-num_device, &pmap->stats.device);
1105 assert(pmap->stats.external >= num_external);
1107 OSAddAtomic(-num_external, &pmap->stats.external);
1108 assert(pmap->stats.internal >= num_internal);
1110 OSAddAtomic(-num_internal, &pmap->stats.internal);
1111 assert(pmap->stats.reusable >= num_reusable);
1113 OSAddAtomic(-num_reusable, &pmap->stats.reusable);
1114 assert(pmap->stats.compressed >= num_compressed);
1116 OSAddAtomic64(-num_compressed, &pmap->stats.compressed);
1120 if (pmap->stats.wired_count < num_unwired)
1123 assert(pmap->stats.wired_count >= num_unwired);
1124 OSAddAtomic(-num_unwired, &pmap->stats.wired_count);
1125 pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired));
1285 pmap_t pmap;
1328 if (pv_h->pmap == PMAP_NULL)
1337 pmap = pv_e->pmap;
1339 pte = pmap_pte(pmap, vaddr);
1342 "pmap_page_protect: PTE mismatch, pn: 0x%x, pmap: %p, vaddr: 0x%llx, pte: 0x%llx", pn, pmap, vaddr, *pte);
1346 "pmap=%p pn=0x%x vaddr=0x%llx\n",
1347 pmap, pn, vaddr);
1356 /* Remove per-pmap wired count */
1358 OSAddAtomic(-1, &pmap->stats.wired_count);
1359 pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE);
1362 if (pmap != kernel_pmap &&
1366 OSAddAtomic64(+1, &pmap->stats.compressed);
1367 PMAP_STATS_PEAK(pmap->stats.compressed);
1368 pmap->stats.compressed_lifetime++;
1379 PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
1381 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
1388 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
1394 if (pmap->stats.resident_count < 1)
1397 pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
1398 assert(pmap->stats.resident_count >= 1);
1399 OSAddAtomic(-1, &pmap->stats.resident_count);
1405 pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
1407 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
1410 if (pmap != kernel_pmap) {
1412 assert(pmap->stats.reusable > 0);
1413 OSAddAtomic(-1, &pmap->stats.reusable);
1415 assert(pmap->stats.internal > 0);
1416 OSAddAtomic(-1, &pmap->stats.internal);
1418 assert(pmap->stats.external > 0);
1419 OSAddAtomic(-1, &pmap->stats.external);
1431 pv_h->pmap = PMAP_NULL;
1453 PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg);
1455 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE);
1464 if (pv_h->pmap == PMAP_NULL) {
1469 pv_h->pmap = pvh_e->pmap;
1504 pmap_t pmap;
1540 * the per-pmap lock
1542 if (pv_h->pmap != PMAP_NULL) {
1556 pmap = pv_e->pmap;
1561 pte = pmap_pte(pmap, va);
1585 PMAP_UPDATE_TLBS(pmap,
1589 /* delayed TLB flush: add "pmap" info */
1591 pmap,
1600 /* update pmap "reusable" stats */
1603 pmap != kernel_pmap) {
1605 assert(pmap->stats.reusable > 0);
1606 OSAddAtomic(-1, &pmap->stats.reusable);
1609 OSAddAtomic(+1, &pmap->stats.internal);
1610 PMAP_STATS_PEAK(pmap->stats.internal);
1613 OSAddAtomic(+1, &pmap->stats.external);
1614 PMAP_STATS_PEAK(pmap->stats.external);
1618 pmap != kernel_pmap) {
1620 OSAddAtomic(+1, &pmap->stats.reusable);
1621 PMAP_STATS_PEAK(pmap->stats.reusable);
1624 assert(pmap->stats.internal > 0);
1625 OSAddAtomic(-1, &pmap->stats.internal);
1628 assert(pmap->stats.external > 0);
1629 OSAddAtomic(-1, &pmap->stats.external);
1669 pmap_t pmap;
1708 pv_h->pmap != PMAP_NULL) {
1716 pmap = pv_e->pmap;
1722 pte = pmap_pte(pmap, va);
1741 * The mapping must already exist in the pmap.
1834 pmap_t pmap,
1846 if (pmap == PMAP_NULL || pmap == kernel_pmap || s64 == e64)
1850 pmap,
1856 PMAP_LOCK(pmap);
1864 pde = pmap_pde(pmap, s64);
1870 spte = pmap_pte(pmap,
1886 PMAP_UNLOCK(pmap);
1887 PMAP_LOCK(pmap);
1892 PMAP_UNLOCK(pmap);
1895 pmap, 0, 0, 0, 0);
1903 __unused pmap_t pmap,