• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/i386/

Lines Matching refs:pmap

60  *	File:	pmap.c
109 #include <vm/pmap.h>
149 /* #define DEBUGINTERRUPTS 1 uncomment to ensure pmap callers have interrupts enabled */
151 #define pmap_intr_assert() {if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) panic("pmap interrupt assert %s, %d",__FILE__, __LINE__);}
186 pmap_t pmap,
274 pmap_t pmap;
285 pmap we cannot block and still panic, so, we keep a separate hot pool for use only on
296 if (this pv is for this pmap/vaddr) {
316 hash list via a hash of [pmap, vaddr]. These have been designed with the two goals of
331 pmap_t pmap;
338 pmap_t pmap;
354 pmap_t pmap; /* pmap where mapping lies */
364 pmap_t pmap;
385 * with the pmap system locked (at SPLVM, not in the cpus_active set).
465 * for. Initialized to zero so that pmap operations done before
484 #define pvhashidx(pmap, va) (((uint32_t)pmap ^ ((uint32_t)((uint64_t)va >> PAGE_SHIFT) & 0xFFFFFFFF)) & npvhash)
518 * There are two structures in the pmap module that need locking:
521 * for the list in question.) Most routines want to lock a pmap and
527 * The system wide pmap lock has been removed. Now, paths take a lock
528 * on the pmap before changing its 'shape' and the reverse order lockers
532 * lets the pmap layer run (nearly completely) interrupt enabled, unlike
537 * pmap locking
540 #define PMAP_LOCK(pmap) { \
541 simple_lock(&(pmap)->lock); \
544 #define PMAP_UNLOCK(pmap) { \
545 simple_unlock(&(pmap)->lock); \
576 #define LOOP_CHECK(msg, pmap) \
579 kprintf("%s: cpu %d pmap %x\n", \
580 msg, cpu_number(), pmap); \
587 #define LOOP_CHECK(msg, pmap)
591 static void pmap_flush_tlbs(pmap_t pmap);
593 #define PMAP_UPDATE_TLBS(pmap, s, e) \
594 pmap_flush_tlbs(pmap)
607 struct pmap kernel_pmap_store;
613 struct zone *pmap_zone; /* zone of pmap structures */
623 * Pmap cache. Cache is threaded through ref_count field of pmap.
658 pvhash_idx = pvhashidx(pvh->pmap, pvh->va);
699 * the single pml4 page per pmap is allocated at pmap create time and exists
700 * for the duration of the pmap. we allocate this page in kernel vm (to save us one
706 pmap64_pml4(pmap_t pmap, vm_map_offset_t vaddr)
708 return ((pml4_entry_t *)pmap->pm_hold + ((vm_offset_t)((vaddr>>PML4SHIFT)&(NPML4PG-1))));
716 pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr)
722 assert(pmap);
728 pml4 = pmap64_pml4(pmap, vaddr);
761 pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr)
767 assert(pmap);
774 pdpt = pmap64_pdpt(pmap, vaddr);
813 * return address of mapped pte for vaddr va in pmap pmap.
815 * if targeted pmap is not the kernel pmap
825 pmap_pte(pmap_t pmap, vm_map_offset_t vaddr)
831 assert(pmap);
832 pde = pmap_pde(pmap,vaddr);
835 if (pmap == kernel_pmap)
1146 * The kernel's pmap is statically allocated so we don't
1340 kprintf("Kernel traces for pmap operations enabled\n");
1355 * Initialize the pmap module.
1356 * Called by vm_init, to initialize any structures that the pmap
1437 s = (vm_size_t) sizeof(struct pmap);
1438 pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */
1459 pv_e->pmap = kernel_pmap;
1466 * Initialize pmap cache.
1514 result = (pv_h->pmap == PMAP_NULL);
1520 pmap_t pmap,
1527 if (pmap == PMAP_NULL) {
1533 phys_page = pmap_find_phys(pmap, offset);
1535 if (pmap != kernel_pmap &&
1536 pmap->pm_task_map == TASK_MAP_32BIT &&
1551 pmap, vstart, vend, phys_page, offset);
1702 * mapping in the bottom 4G of the user's pmap. The task mapping changes
1718 * cpu_kernel_cr3 is the cr3 for the kernel's pmap.
1726 * while the system pmap lock is held. It is used by
1734 * system-wide pmap lock is held) this not serviced by
1832 * If some cpu is not using the physical pmap pointer that it
1834 * pmap that is being destroyed! Make sure we are
1835 * physically on the right pmap:
1852 * pmap structure.
1892 * Add a reference to the specified pmap.
1913 * The pmap must be locked.
1914 * If the pmap is not the kernel pmap, the range must lie
1921 pmap_t pmap,
1942 if (pmap != kernel_pmap &&
1943 pmap->pm_task_map == TASK_MAP_32BIT &&
1989 PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr);
2032 if (pv_h->pmap == PMAP_NULL)
2035 if (pv_h->va == vaddr && pv_h->pmap == pmap) { /* rooted or not */
2046 pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
2057 pv_h->pmap = pvh_e->pmap;
2060 pv_h->pmap = PMAP_NULL;
2068 pvhash_idx = pvhashidx(pmap,vaddr);
2079 if (pvh_e->pmap == pmap && pvh_e->va == vaddr && pvh_e->ppn == ppn) break;
2117 if (pmap->stats.resident_count < num_removed)
2120 assert(pmap->stats.resident_count >= num_removed);
2121 OSAddAtomic(-num_removed, (SInt32 *) &pmap->stats.resident_count);
2124 if (pmap->stats.wired_count < num_unwired)
2127 assert(pmap->stats.wired_count >= num_unwired);
2128 OSAddAtomic(-num_unwired, (SInt32 *) &pmap->stats.wired_count);
2270 register pmap_t pmap;
2313 if (pv_h->pmap != PMAP_NULL) {
2320 pmap = pv_e->pmap;
2323 pte = pmap_pte(pmap, vaddr);
2326 kprintf("pmap_page_protect pmap %p pn 0x%x vaddr 0x%llx\n",pmap, pn, vaddr);
2336 if (remove || pmap == kernel_pmap) {
2342 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
2349 if (pmap->stats.resident_count < 1)
2352 assert(pmap->stats.resident_count >= 1);
2353 OSAddAtomic(-1, (SInt32 *) &pmap->stats.resident_count);
2363 pv_h->pmap = PMAP_NULL;
2370 pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
2388 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
2398 if (pv_h->pmap == PMAP_NULL) {
2403 pvhash_idx = pvhashidx(pvh_e->pmap,pvh_e->va);
2408 pv_h->pmap = pvh_e->pmap;
2533 pmap_t pmap,
2544 pmap_enter(pmap, va, pa, prot, attr, TRUE);
2565 register pmap_t pmap,
2591 printf("pmap(%qx, %x)\n", vaddr, pn);
2592 if (pmap == PMAP_NULL)
2598 (int) pmap,
2602 if ( (prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled )
2609 * zalloc may cause pageout (which will lock the pmap system).
2619 PMAP_LOCK(pmap);
2622 * Expand pmap to include this pte. Assume that
2623 * pmap is always expanded to include enough hardware
2627 while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) {
2629 * Must unlock to expand the pmap.
2631 PMAP_UNLOCK(pmap);
2632 pmap_expand(pmap, vaddr); /* going to grow pde level page(s) */
2633 PMAP_LOCK(pmap);
2675 if (pmap != kernel_pmap)
2686 OSAddAtomic(+1, (SInt32 *) &pmap->stats.wired_count);
2690 assert(pmap->stats.wired_count >= 1);
2691 OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count);
2729 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
2738 if (pmap->stats.resident_count < 1)
2741 assert(pmap->stats.resident_count >= 1);
2742 OSAddAtomic(-1, (SInt32 *) &pmap->stats.resident_count);
2747 if (pmap->stats.wired_count < 1)
2750 assert(pmap->stats.wired_count >= 1);
2751 OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count);
2765 if (pv_h->pmap == PMAP_NULL) {
2769 if (pv_h->va == vaddr && pv_h->pmap == pmap) {
2778 pvhash_idx = pvhashidx(pvh_e->pmap, pvh_e->va);
2783 pv_h->pmap = pvh_e->pmap;
2787 pv_h->pmap = PMAP_NULL;
2797 pvhash_idx = pvhashidx(pmap,vaddr);
2801 if (NULL==pprevh)panic("pmap enter 1");
2808 if (pvh_e->pmap == pmap && pvh_e->va == vaddr && pvh_e->ppn == old_ppn) break;
2815 if(NULL==pprevh)panic("pmap enter 2");
2830 assert(pmap->stats.wired_count >= 1);
2831 OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count);
2856 if (pv_h->pmap == PMAP_NULL) {
2861 pv_h->pmap = pmap;
2875 * if we are on the kernel pmap we'll use one of the special private
2879 if (kernel_pmap == pmap) {
2883 PMAP_UNLOCK(pmap);
2892 pvh_e->pmap = pmap;
2895 pvhash_idx = pvhashidx(pmap,vaddr);
2916 OSAddAtomic(+1, (SInt32 *) &pmap->stats.resident_count);
2917 if (pmap->stats.resident_count > pmap->stats.resident_max) {
2918 pmap->stats.resident_max = pmap->stats.resident_count;
2936 if (pmap != kernel_pmap)
2946 OSAddAtomic(+1, (SInt32 *) &pmap->stats.wired_count);
2960 PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE);
2970 PMAP_UNLOCK(pmap);
2979 * The mapping must already exist in the pmap.
2990 * We must grab the pmap system lock because we may
3018 pmap_find_phys(pmap_t pmap, addr64_t va)
3025 ptp = pmap_pte(pmap, va);
3048 register pmap_t pmap,
3055 ppn = pmap_find_phys(pmap, vaddr);
3089 * put the page into the pmap's obj list so it
3127 panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
3179 * put the page into the pmap's obj list so it
3217 panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n",
3245 * Expands a pmap to be able to map the specified virtual address.
3248 * pmap, then re-maps the physical pages that were in the old
3249 * pmap to be in the new pmap.
3251 * Must be called with the pmap system and the pmap unlocked,
3291 * put the page into the pmap's obj list so it
3330 panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n",
3507 * Make the specified pages (by pmap, offset)
3520 __unused pmap_t pmap,
3526 pmap++; start_addr++; end_addr++; pageable++;
3542 register pmap_t pmap;
3568 * the entire pmap system locked.
3570 if (pv_h->pmap != PMAP_NULL) {
3578 pmap = pv_e->pmap;
3589 pte = pmap_pte(pmap, va);
3597 PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
3625 register pmap_t pmap;
3662 * the entire pmap system locked.
3664 if (pv_h->pmap != PMAP_NULL) {
3671 pmap = pv_e->pmap;
3679 * using this pmap, flushes its TLB state
3681 PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
3687 pte = pmap_pte(pmap, va);
3905 if (pv_h->pmap || pv_h->next)
3910 if (pv_h->pmap)
3911 printf("%llx in pmap %p\n", pv_h->va, pv_h->pmap);
3987 __unused pmap_t pmap,
4004 ptep = pmap_pte(map->pmap, va);
4185 * The per-cpu pmap data structure itself.
4282 * grand = the pmap that we will nest subord into
4283 * subord = the pmap that goes into the grand
4284 * vstart = start of range in pmap to be inserted
4285 * nstart = start of range in pmap nested pmap
4288 * Inserts a pmap into another. This is used to implement shared segments.
4293 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
4379 * grand = the pmap that we will nest subord into
4380 * vaddr = start of range in pmap to be unnested
4382 * Removes a pmap from another. This is used to implement shared segments.
4411 // invalidate all pdes for segment at vaddr in pmap grand
4448 * the specified pmap
4450 void pmap_disable_NX(pmap_t pmap) {
4452 pmap->nx_enabled = 0;
4512 * Called with pmap locked, we:
4517 * - flush the local tlb is active for this pmap
4518 * - return ... the caller will unlock the pmap
4521 pmap_flush_tlbs(pmap_t pmap)
4527 pmap_paddr_t pmap_cr3 = pmap->pm_cr3;
4538 * Note: for the kernel pmap we look for 64-bit shared address maps.
4546 (pmap->pm_shared) ||
4547 ((pmap == kernel_pmap) &&
4565 (int) pmap, cpus_to_signal, flush_self, 0, 0);
4581 "cpu(s) failing to respond to interrupts, pmap=%p cpus_to_respond=0x%lx",
4582 pmap, cpus_to_respond);
4602 * We need this flush even if the pmap being changed
4610 (int) pmap, cpus_to_signal, flush_self, 0, 0);
4651 kprintf("pmap 0x%x\n",p);
4664 pmap_dump(current_cpu_datap()->cpu_active_thread->task->map->pmap);
4732 dump_4GB_pdpt(tp->map->pmap);