• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10.1/xnu-2782.1.97/osfmk/vm/

Lines Matching refs:pages

246  *	Resident pages that represent real memory
263 * real pages, for example to leave a page with
281 /* N.B. Guard and fictitious pages must not
285 * Fictitious pages don't have a physical address,
293 * Guard pages are not accessible so they don't
310 * importance to anonymous pages (less likely to pick)
314 queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */
329 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
330 unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
331 uint64_t vm_page_purged_count = 0; /* total count of purged pages */
373 * that pages are "uninteresting" and should be placed
647 * than the number of physical pages in the system.
752 * Up until now, the pages which have been set aside are not under
755 * all VM managed pages are "free", courtesy of pmap_startup.
761 printf("vm_page_bootstrap: %d free pages and %d wired pages\n",
814 * Allocate and map physical pages to back new virtual pages.
908 * Check if we want to initialize pages to a known value
936 * Release pages in reverse order so that physical pages
939 * they require several consecutive pages.
963 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
1012 0, PAGE_SIZE, "vm pages");
1024 * Adjust zone statistics to account for the real pages allocated
1038 * vm_page_create turns this memory into available pages.
1139 /* only insert "pageout" pages into "pageout" objects,
1140 * and normal pages into normal objects */
1183 * Now link into the object's list of backed pages.
1450 * Now remove from the object's list of backed pages.
1620 * new pages can be inserted into this object... this in turn
1687 * One exception is VM object collapsing, where we transfer pages
1770 * we're leaving this turned off for now... currently pages
1794 * Returns VM_PAGE_NULL if there are no free pages.
1854 * Add more fictitious pages to the zone.
1863 * private pages for pageout, and as blocking pages for
1866 * 3. To smooth allocation humps, we allocate single pages
1880 * pages are immediately available, and do not zero the space. We need
1894 * number of fictitious pages that any single caller will
1898 * and then call this routine. This routine finds the pages
1901 * of fictitious pages required in this manner is 2. 5 is
2030 * and see if current number of free pages would allow us
2032 * if there are pages available, disable preemption and
2036 * empty, figure out how many pages we can steal from the
2038 * return 1 of these pages when done... only wakeup the
2039 * pageout_scan thread if we moved pages from the global
2309 * the pageout daemon will keep making free pages
2335 * when we are single-threaded and pages are being released
2363 * If there are plenty of free pages, then we don't sleep.
2377 * need to allocate two pages. The first allocation
2653 * Free a list of pages. The list can be up to several hundred pages,
2676 * that we can 'pipeline' the pages onto the
2788 * have enough pages to satisfy them all.
2883 * Wired pages are not counted as "re-usable"
2929 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
3056 * inactive queue. Note wired pages should not have
3349 * move pages from the specified aging bin to
3435 * Switch "throttled" pages to "active".
3459 * We insert it at the head of the active queue, so that these pages
3476 printf("reactivated %d throttled pages\n", vm_page_throttled_count);
3494 * move pages from the indicated local queue to the global active queue
3525 * Switch "local" pages to "active".
3818 * Check that the list of pages is ordered by
3823 vm_page_t pages,
3830 prev_addr = pages->phys_page;
3832 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
3836 printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
3843 printf("pages %p actual count 0x%x but requested 0x%x\n",
3844 pages, page_count, npages);
4000 * Find a region large enough to contain at least n pages
4004 * we assume that the vm_page_t array has the avaiable physical pages in an
4013 * sweep at the beginning of the array looking for pages that meet our criterea
4018 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
4020 * to other threads trying to acquire free pages (or move pages from q to q),
4023 * which steals the pages from the queues they're currently on... pages on the free
4024 * queue can be stolen directly... pages that are on any of the other queues
4028 * dump the pages we've currently stolen back to the free list, and pick up our
4035 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
4053 * Can we steal in-use (i.e. not free) pages when searching for
4054 * physically-contiguous pages ?
4142 /* no more low pages... */
4176 * pages on the free list are always 'busy'
4178 * for the transient states... pages that are
4209 * If we can't steal used pages,
4241 * so can't consider any free pages... if
4312 * first pass through to pull the free pages
4314 * need substitute pages, we won't grab any
4315 * of the free pages in the run... we'll clear
4318 * free pages in this run and return them to the free list
4364 * when substituing for pmapped/dirty pages
4650 * gobbled pages are also counted as wired pages
4675 printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
4689 * Allocate a list of contiguous, wired pages.
4700 vm_page_t pages;
4717 pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
4719 if (pages == VM_PAGE_NULL)
4732 * The CPM pages should now be available and
4735 assert(vm_page_verify_contiguous(pages, npages));
4737 *list = pages;
4745 * when working on a 'run' of pages, it is necessary to hold
4749 * collections of pages that don't require any work brokered by the
4751 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
4778 * if this object contains the majority of the pages resident
4780 * worked on contain the majority of the pages), we could
4782 * to find pages to move to the free queue, since it has to
4824 * Add this page to our list of reclaimed pages,
5017 * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
5242 * we've already factored out pages in the laundry which
5389 HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
5473 HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
5493 HIBLOG("Freed %d pages\n", count);
5628 * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
5685 Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
5686 pages known to VM to not need saving are subtracted.
5687 Wired pages to be saved are present in page_list_wired, pageable in page_list.
5701 uint32_t pages = page_list->page_count;
5704 uint32_t count_wire = pages;
5754 hibernate_stats.cd_pages = pages;
5771 pages--;
5786 pages--;
5804 pages--;
5820 pages--;
5980 pages -= count_discard_vm_struct_pages;
6002 hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
6014 HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n",
6015 pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
6020 HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
6022 *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;