• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/ppc/

Lines Matching refs:pmap

64  * 	Utah $Hdr: pmap.c 1.28 92/06/23$
111 #include <vm/pmap.h>
115 #include <ppc/pmap.h>
139 pmapTransTab *pmapTrans; /* Point to the hash to pmap translations */
145 void pmap_activate(pmap_t pmap, thread_t th, int which_cpu);
146 void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu);
152 extern struct pmap kernel_pmap_store;
153 pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */
154 addr64_t kernel_pmap_phys; /* Pointer to kernel pmap and anchor for in-use pmaps, physical address */
155 pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */
156 pmap_t sharedPmap; /* Pointer to common pmap for 64-bit address spaces */
157 struct zone *pmap_zone; /* zone of pmap structures */
173 * free pmap list. caches the first free_pmap_max pmaps that are freed up
362 * Initialize kernel pmap
472 ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096) + /* Size of pmap translate table */
507 pmapTrans = (pmapTransTab *)addr; /* Point to the pmap to hash translation table */
509 pmapTrans[PPC_SID_KERNEL].pmapPAddr = (addr64_t)((uintptr_t)kernel_pmap); /* Initialize the kernel pmap in the translate table */
510 pmapTrans[PPC_SID_KERNEL].pmapVAddr = CAST_DOWN(unsigned int, kernel_pmap); /* Initialize the kernel pmap in the translate table */
512 addr += ((((1 << maxAdrSpb) * sizeof(pmapTransTab)) + 4095) & -4096); /* Point past pmap translate table */
573 * finishes the initialization of the pmap module.
575 * to initialize any remaining data structures that the pmap module
578 * Note that the pmap needs to be sized and aligned to
587 pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap");
654 * A pmap is either in the free list or in the in-use list. The only use
656 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
667 pmap_t pmap, ckpmap, fore;
673 * A software use-only map doesn't even need a pmap structure.
679 * If there is a pmap in the pmap free list, reuse it.
687 pmap = free_pmap_list; /* Yes, allocate it */
688 free_pmap_list = (pmap_t)pmap->freepmap; /* Dequeue this one (we chain free ones through freepmap) */
695 pmap = (pmap_t) zalloc(pmap_zone); /* Get one */
696 if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */
698 bzero((char *)pmap, pmapSize); /* Clean up the pmap */
710 ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */
719 pmap->space = (currSID * incrVSID) & (maxAdrSp - 1); /* Calculate the actual VSID */
720 pmap->spaceNum = currSID; /* Set the space ID number */
726 pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */
727 fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */
728 pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */
729 ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */
731 physpmap = ((addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)pmap)) << 12) | (addr64_t)((unsigned int)pmap & 0xFFF); /* Get the physical address of the pmap */
733 pmap->pmapvr = (addr64_t)((uintptr_t)pmap) ^ physpmap; /* Make V to R translation mask */
735 pmapTrans[pmap->space].pmapPAddr = physpmap; /* Set translate table physical to point to us */
736 pmapTrans[pmap->space].pmapVAddr = CAST_DOWN(unsigned int, pmap); /* Set translate table virtual to point to us */
739 pmap->pmapVmmExt = NULL; /* Clear VMM extension block vaddr */
740 pmap->pmapVmmExtPhys = 0; /* and the paddr, too */
741 pmap->pmapFlags = pmapKeyDef; /* Set default key */
742 pmap->pmapCCtl = pmapCCtlVal; /* Initialize cache control */
743 pmap->ref_count = 1;
744 pmap->stats.resident_count = 0;
745 pmap->stats.wired_count = 0;
746 pmap->pmapSCSubTag = 0x0000000000000000ULL; /* Make sure this is clean an tidy */
750 return(pmap);
756 * Gives up a reference to the specified pmap. When the reference count
757 * reaches zero the pmap structure is added to the pmap free list.
762 pmap_destroy(pmap_t pmap)
768 if (pmap == PMAP_NULL)
771 if ((ref_count = hw_atomic_sub(&pmap->ref_count, 1)) == UINT_MAX) /* underflow */
777 if (!(pmap->pmapFlags & pmapVMgsaa)) { /* Don't try this for a shadow assist guest */
778 pmap_unmap_sharedpage(pmap); /* Remove any mapping of page -1 */
782 if(pmap->stats.resident_count != 0)
783 panic("PMAP_DESTROY: pmap not empty");
785 if(pmap->stats.resident_count != 0) {
786 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000ULL);
791 * Add the pmap to the pmap free list.
796 * Add the pmap to the pmap free list.
802 pmap->freepmap = free_pmap_list; /* Queue in front */
803 free_pmap_list = pmap;
808 if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */
809 fore = (pmap_t)pmap->pmap_link.prev;
810 aft = (pmap_t)pmap->pmap_link.next;
811 fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */
812 aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */
814 pmapTrans[pmap->space].pmapPAddr = -1; /* Invalidate the translate table physical */
815 pmapTrans[pmap->space].pmapVAddr = -1; /* Invalidate the translate table virtual */
816 zfree(pmap_zone, pmap);
822 * pmap_reference(pmap)
823 * gains a reference to the specified pmap.
826 pmap_reference(pmap_t pmap)
828 if (pmap != PMAP_NULL)
829 (void)hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */
835 * Removes mappings of the associated page from the specified pmap
839 pmap_t pmap,
846 if (pmap == PMAP_NULL) { /* This should never be called with a null pmap */
847 panic("pmap_remove_some_phys: null pmap\n");
853 do { /* Keep going until we toss all pages from this pmap */
854 if (pmap->pmapFlags & pmapVMhost) {
867 panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %p, pmap = %p, code = %p\n",
868 pp, pmap, mp); /* Handle failure with our usual lack of tact */
871 mp = hw_purge_space(pp, pmap); /* Toss a map */
881 panic("pmap_remove_some_phys: hw_purge_phys failed - pp = %p, pmap = %p, code = %p\n",
882 pp, pmap, mp); /* Handle failure with our usual lack of tact */
888 if ((pmap->pmapFlags & pmapVMhost) && !pmap_verify_free(pa))
889 panic("pmap_remove_some_phys: cruft left behind - pa = %08X, pmap = %p\n", pa, pmap);
896 * pmap_remove(pmap, s, e)
898 * range determined by [s, e) and pmap.
907 pmap_t pmap,
913 if (pmap == PMAP_NULL) return; /* Leave if software pmap */
929 va = mapping_remove(pmap, va); /* Remove the mapping and see what's next */
1050 * pmap_protect(pmap, s, e, prot)
1052 * virtual address range determined by [s, e] and pmap to prot.
1056 * Note that any requests to change the protection of a nested pmap are
1057 * ignored. Those changes MUST be done by calling this with the correct pmap.
1060 pmap_t pmap,
1068 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
1071 pmap_remove(pmap, (addr64_t)sva, (addr64_t)eva); /* Yeah, dump 'em */
1079 mapping_protect(pmap, va, (prot & VM_PROT_ALL), &va); /* Change the protection and see what's next */
1091 * address (phys) in the pmap with the protection requested. If the
1101 pmap_enter(pmap_t pmap, vm_map_offset_t va, ppnum_t pa, vm_prot_t prot,
1107 if (pmap == PMAP_NULL) return; /* Leave if software pmap */
1120 colva = mapping_make(pmap, va, pa, mflags, 1, (prot & VM_PROT_ALL) ); /* Enter the mapping into the pmap */
1124 mapping_remove(pmap, colva); /* Remove the mapping that collided */
1148 void pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
1154 if (pmap == PMAP_NULL) { /* Did they give us a pmap? */
1155 panic("pmap_map_block: null pmap\n"); /* No, like that's dumb... */
1164 colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
1167 panic("pmap_map_block: mapping error %d, pmap = %p, va = %016llX\n", (uint32_t)(colva & mapRetCode), pmap, va);
1173 int pmap_map_block_rc(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */
1179 if (pmap == PMAP_NULL) { /* Did they give us a pmap? */
1180 panic("pmap_map_block_rc: null pmap\n"); /* No, like that's dumb... */
1186 colva = mapping_make(pmap, va, pa, mflags, size, prot); /* Enter the mapping into the pmap */
1194 * pmap_extract(pmap, va)
1196 * virtual address specified by pmap and va if the
1202 vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va) {
1219 mp = mapping_find(pmap, (addr64_t)gva, &nextva,1); /* Find the mapping for this address */
1243 * ppnum_t pmap_find_phys(pmap, addr64_t va)
1245 * virtual address specified by pmap and va if the
1250 ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va) {
1259 mp = mapping_find(pmap, va, &nextva, 1); /* Find the mapping for this address */
1294 __unused pmap_t pmap,
1384 pmap_collect(__unused pmap_t pmap)
1399 __unused pmap_t pmap,
1411 __unused pmap_t pmap,
1420 * pmap_pageable(pmap, s, e, pageable)
1421 * Make the specified pages (by pmap, offset)
1436 __unused pmap_t pmap,
1451 __unused pmap_t pmap,
1553 __unused pmap_t pmap,
1626 * The PPC pmap can only nest segments of 256MB, aligned on a 256MB boundary.
1634 * grand = the pmap that we will nest subord into
1635 * subord = the pmap that goes into the grand
1636 * vstart = start of range in pmap to be inserted
1637 * nstart = start of range in pmap nested pmap
1640 * Inserts a pmap into another. This is used to implement shared segments.
1648 * we are doing this. For example, VM should not be doing any pmap enters while it is nesting
1674 mp->mpFlags = 0x01000000 | mpNest | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */
1676 mp->mpSpace = subord->space; /* Set the address space/pmap lookup ID */
1683 colladdr = hw_add_map(grand, mp); /* Go add the mapping to the pmap */
1687 panic("pmap_nest: attempt to nest into a non-empty range - pmap = %p, start = %016llX, end = %016llX\n",
1697 * grand = the pmap that we will nest subord into
1698 * vaddr = start of range in pmap to be unnested
1699 * size = size of range in pmap to be unnested
1701 * Removes a pmap from another. This is used to implement shared segments.
1742 (void)mapping_remove(grand, vaddr); /* Toss the nested pmap mapping */
1744 invalidateSegs(grand); /* Invalidate the pmap segment cache */
1748 * on all processors (if they are using the pmap we just changed) before returning.
1753 * because access to the subordinate pmap is being removed, but the pmap is still
1756 * Note that we only kick the other processor if we see that it was using the pmap while we
1764 if((unsigned int)grand == PerProcTable[i].ppe_vaddr->ppUserPmapVirt) { /* Is this guy using the changed pmap? */
1771 if(cpu_signal(i, SIGPcpureq, CPRQsegload, 0) == KERN_SUCCESS) { /* Make sure we see the pmap change */
1792 * mappings defined for the purpose: the special pmap nest, and linkage mapping.
1794 * The special pmap nest (which is allocated in this function) is used as a place holder
1795 * in the kernel's pmap search list. It is 512MB long and covers the address range
1803 * user's pmap. This is pointed to by the per_proc and is switched in and out
1818 mp->mpFlags = 0x01000000 | mpLinkage | mpPerm | mpBSu | nlists; /* Make this a permanent nested pmap with a 32MB basic size unit */
1820 mp->mpSpace = kernel_pmap->space; /* Set the address space/pmap lookup ID */
1827 colladdr = hw_add_map(kernel_pmap, mp); /* Go add the mapping to the pmap */
1879 * Note that we do not go to the trouble of making a pmap segment cache
1898 if((thread->machine.umwSpace == map->pmap->space) && (thread->machine.umwRelo == reladd)) { /* Already mapped? */
1908 thread->machine.umwSpace = map->pmap->space; /* Set the address space/pmap lookup ID */
1909 mp->mpSpace = map->pmap->space; /* Set the address space/pmap lookup ID */
1912 * Here we make an assumption that we are going to be using the base pmap's address space.
1916 hw_map_seg(map->pmap, lowGlo.lgUMWvaddr, baddrs); /* Make the entry for the first segment */
1983 sharedPmap = pmap_create(0, FALSE); /* Get a pmap to hold the common segment */
2008 * void pmap_map_sharedpage(pmap_t pmap);
2015 void pmap_map_sharedpage(task_t task, pmap_t pmap){
2020 ret = pmap_nest(pmap, sharedPmap, 0xFFFFFFFFF0000000ULL, 0x00000000F0000000ULL,
2032 * void pmap_unmap_sharedpage(pmap_t pmap);
2038 void pmap_unmap_sharedpage(pmap_t pmap){
2049 mp = hw_find_map(pmap, 0xFFFFFFFFF0000000ULL, &nextva); /* Find the mapping for this address */
2051 panic("pmap_unmap_sharedpage: mapping lock failure - rc = %p, pmap = %p\n", mp, pmap); /* Die... */
2064 ret = pmap_unnest(pmap, 0xFFFFFFFFF0000000ULL, 0x0000000010000000ULL); /* Unnest the max 64-bit page */
2087 * the specified pmap
2089 void pmap_disable_NX(pmap_t pmap) {
2091 pmap->pmapFlags |= pmapNXdisabled;