• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching defs:pages

128 	vm_page_t	pages;
207 * Resident pages that represent real memory
226 * real pages, for example to leave a page with
237 * Fictitious pages don't have a physical address,
245 * Guard pages are not accessible so they don't
262 * affinity to zf pages
277 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
278 uint64_t vm_page_purged_count = 0; /* total count of purged pages */
305 * that pages are "uninteresting" and should be placed
493 * than the number of physical pages in the system.
537 bucket->pages = VM_PAGE_NULL;
561 * Up until now, the pages which have been set aside are not under
564 * all VM managed pages are "free", courtesy of pmap_startup.
569 printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
621 * Allocate and map physical pages to back new virtual pages.
684 * Check if we want to initialize pages to a known value
692 * a pool of pages whose addresess are less than 4G... this pool
704 panic("couldn't reserve the lopage pool: not enough lo pages\n");
737 * Release pages in reverse order so that physical pages
740 * they require several consecutive pages.
762 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
811 0, PAGE_SIZE, "vm pages");
822 * Adjust zone statistics to account for the real pages allocated
837 * vm_page_create turns this memory into available pages.
920 /* only insert "pageout" pages into "pageout" objects,
921 * and normal pages into normal objects */
939 mem->next = bucket->pages;
940 bucket->pages = mem;
948 * Now link into the object's list of backed pages.
1021 if (bucket->pages) {
1022 vm_page_t *mp = &bucket->pages;
1038 mem->next = bucket->pages;
1045 bucket->pages = mem;
1074 * Now link into the object's list of backed pages.
1131 if ((this = bucket->pages) == mem) {
1134 bucket->pages = mem->next;
1150 * Now remove from the object's list of backed pages.
1243 * new pages can be inserted into this object... this in turn
1249 if (bucket->pages == VM_PAGE_NULL) {
1256 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
1300 * One exception is VM object collapsing, where we transfer pages
1347 * Returns VM_PAGE_NULL if there are no free pages.
1411 * Add more fictitious pages to the free list.
1420 * private pages for pageout, and as blocking pages for
1423 * 3. To smooth allocation humps, we allocate single pages
1430 * 4. By having the pages in the zone pre-initialized, we need
1447 * pages are immediately available, and do not zero the space. We need
1461 * number of fictitious pages that any single caller will
1465 * and then call this routine. This routine finds the pages
1468 * of fictitious pages required in this manner is 2. 5 is
1581 * and see if current number of free pages would allow us
1583 * if there are pages available, disable preemption and
1587 * empty, figure out how many pages we can steal from the
1589 * return 1 of these pages when done... only wakeup the
1590 * pageout_scan thread if we moved pages from the global
1863 * the pageout daemon will keep making free pages
1903 * If there are plenty of free pages, then we don't sleep.
1917 * need to allocate two pages. The first allocation
2131 * Free a list of pages. The list can be up to several hundred pages,
2134 * per page. We sort the incoming pages into n lists, one for
2353 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
2456 * inactive queue. Note wired pages should not have
2663 * move pages from the specified aging bin to
2878 * Check that the list of pages is ordered by
2883 vm_page_t pages,
2890 prev_addr = pages->phys_page;
2892 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2896 printf("pages %p page_count %d\n", pages, page_count);
2903 printf("pages %p actual count 0x%x but requested 0x%x\n",
2904 pages, page_count, npages);
2958 * + consider pages that are currently 'pmapped'
2961 * + consider dirty pages
2965 * Find a region large enough to contain at least n pages
2969 * we assume that the vm_page_t array has the avaiable physical pages in an
2978 * sweep at the beginning of the array looking for pages that meet our criterea
2983 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
2985 * to other threads trying to acquire free pages (or move pages from q to q),
2988 * which steals the pages from the queues they're currently on... pages on the free
2989 * queue can be stolen directly... pages that are on any of the other queues
2993 * dump the pages we've currently stolen back to the free list, and pick up our
3000 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
3062 /* no more low pages... */
3068 * don't want to take pages from our
3099 * pages on the free list are always 'busy'
3101 * for the transient states... pages that are
3142 * so can't consider any free pages... if
3190 * first pass through to pull the free pages
3192 * need substitute pages, we won't grab any
3193 * of the free pages in the run... we'll clear
3196 * free pages in this run and return them to the free list
3224 * when substituing for pmapped/dirty pages
3241 * pages have already been removed from
3404 * gobbled pages are also counted as wired pages
3426 printf("vm_find_page_contiguous(num=%d,low=%d): found %d pages in %d.%06ds... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages\n",
3437 * Allocate a list of contiguous, wired pages.
3446 vm_page_t pages;
3459 pages = vm_page_find_contiguous(npages, max_pnum, wire);
3461 if (pages == VM_PAGE_NULL)
3490 * The CPM pages should now be available and
3493 assert(vm_page_verify_contiguous(pages, npages));
3495 *list = pages;
3533 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)