• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.9.5/xnu-2422.115.4/osfmk/vm/

Lines Matching refs:pages

149 	vm_page_t	pages;
241 * Resident pages that represent real memory
259 * real pages, for example to leave a page with
277 /* N.B. Guard and fictitious pages must not
281 * Fictitious pages don't have a physical address,
289 * Guard pages are not accessible so they don't
306 * importance to anonymous pages (less likely to pick)
310 queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */
324 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
325 unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
326 uint64_t vm_page_purged_count = 0; /* total count of purged pages */
367 * that pages are "uninteresting" and should be placed
632 * than the number of physical pages in the system.
703 bucket->pages = VM_PAGE_NULL;
734 * Up until now, the pages which have been set aside are not under
737 * all VM managed pages are "free", courtesy of pmap_startup.
744 printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
796 * Allocate and map physical pages to back new virtual pages.
863 * Check if we want to initialize pages to a known value
891 * Release pages in reverse order so that physical pages
894 * they require several consecutive pages.
916 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
965 0, PAGE_SIZE, "vm pages");
977 * Adjust zone statistics to account for the real pages allocated
991 * vm_page_create turns this memory into available pages.
1093 /* only insert "pageout" pages into "pageout" objects,
1094 * and normal pages into normal objects */
1115 mem->next = bucket->pages;
1116 bucket->pages = mem;
1135 * Now link into the object's list of backed pages.
1249 if (bucket->pages) {
1250 vm_page_t *mp = &bucket->pages;
1267 mem->next = bucket->pages;
1274 bucket->pages = mem;
1335 if ((this = bucket->pages) == mem) {
1338 bucket->pages = mem->next;
1355 * Now remove from the object's list of backed pages.
1492 * new pages can be inserted into this object... this in turn
1498 if (bucket->pages == VM_PAGE_NULL) {
1507 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
1559 * One exception is VM object collapsing, where we transfer pages
1642 * we're leaving this turned off for now... currently pages
1666 * Returns VM_PAGE_NULL if there are no free pages.
1726 * Add more fictitious pages to the zone.
1735 * private pages for pageout, and as blocking pages for
1738 * 3. To smooth allocation humps, we allocate single pages
1752 * pages are immediately available, and do not zero the space. We need
1766 * number of fictitious pages that any single caller will
1770 * and then call this routine. This routine finds the pages
1773 * of fictitious pages required in this manner is 2. 5 is
1902 * and see if current number of free pages would allow us
1904 * if there are pages available, disable preemption and
1908 * empty, figure out how many pages we can steal from the
1910 * return 1 of these pages when done... only wakeup the
1911 * pageout_scan thread if we moved pages from the global
2185 * the pageout daemon will keep making free pages
2213 * If there are plenty of free pages, then we don't sleep.
2227 * need to allocate two pages. The first allocation
2478 * Free a list of pages. The list can be up to several hundred pages,
2501 * that we can 'pipeline' the pages onto the
2613 * have enough pages to satisfy them all.
2689 * Wired pages are not counted as "re-usable"
2735 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
2844 * inactive queue. Note wired pages should not have
3174 * move pages from the specified aging bin to
3260 * Switch "throttled" pages to "active".
3284 * We insert it at the head of the active queue, so that these pages
3301 printf("reactivated %d throttled pages\n", vm_page_throttled_count);
3319 * move pages from the indicated local queue to the global active queue
3350 * Switch "local" pages to "active".
3643 * Check that the list of pages is ordered by
3648 vm_page_t pages,
3655 prev_addr = pages->phys_page;
3657 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
3661 printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
3668 printf("pages %p actual count 0x%x but requested 0x%x\n",
3669 pages, page_count, npages);
3803 * Find a region large enough to contain at least n pages
3807 * we assume that the vm_page_t array has the avaiable physical pages in an
3816 * sweep at the beginning of the array looking for pages that meet our criterea
3821 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
3823 * to other threads trying to acquire free pages (or move pages from q to q),
3826 * which steals the pages from the queues they're currently on... pages on the free
3827 * queue can be stolen directly... pages that are on any of the other queues
3831 * dump the pages we've currently stolen back to the free list, and pick up our
3838 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
3856 * Can we steal in-use (i.e. not free) pages when searching for
3857 * physically-contiguous pages ?
3945 /* no more low pages... */
3979 * pages on the free list are always 'busy'
3981 * for the transient states... pages that are
4012 * If we can't steal used pages,
4044 * so can't consider any free pages... if
4115 * first pass through to pull the free pages
4117 * need substitute pages, we won't grab any
4118 * of the free pages in the run... we'll clear
4121 * free pages in this run and return them to the free list
4173 * when substituing for pmapped/dirty pages
4460 * gobbled pages are also counted as wired pages
4485 printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
4499 * Allocate a list of contiguous, wired pages.
4510 vm_page_t pages;
4527 pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
4529 if (pages == VM_PAGE_NULL)
4542 * The CPM pages should now be available and
4545 assert(vm_page_verify_contiguous(pages, npages));
4547 *list = pages;
4555 * when working on a 'run' of pages, it is necessary to hold
4559 * collections of pages that don't require any work brokered by the
4561 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
4588 * if this object contains the majority of the pages resident
4590 * worked on contain the majority of the pages), we could
4592 * to find pages to move to the free queue, since it has to
4631 * Add this page to our list of reclaimed pages,
4824 * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
5049 * we've already factored out pages in the laundry which
5194 HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
5278 HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
5298 HIBLOG("Freed %d pages\n", count);
5477 Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
5478 pages known to VM to not need saving are subtracted.
5479 Wired pages to be saved are present in page_list_wired, pageable in page_list.
5493 uint32_t pages = page_list->page_count;
5496 uint32_t count_wire = pages;
5546 hibernate_stats.cd_pages = pages;
5563 pages--;
5578 pages--;
5596 pages--;
5612 pages--;
5772 pages -= count_discard_vm_struct_pages;
5794 hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
5806 HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n",
5807 pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
5812 HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
5814 *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
6083 mem->next = bucket->pages;
6084 bucket->pages = mem;
6226 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem_next) {
6350 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
6404 if (bucket->pages == VM_PAGE_NULL) {
6410 p = bucket->pages;