Lines Matching refs:page

63  *	The proverbial page-out daemon.
127 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
517 * Handle the "target" page(s). These pages are to be freed if
522 * adjacent page and conversion to a target.
534 * Revoke all access to the page. Since the object is
535 * locked, and the page is busy, this prevents the page
539 * Since the page is left "dirty" but "not modifed", we
540 * can detect whether the page was redirtied during
565 * page, so make it active.
580 * The page was busy so no extraneous activity
594 * Occurs when the original page was wired
603 * Set the dirty state according to whether or not the page was
605 * NOT call pmap_clear_modify since the page is still mapped.
606 * If the page were to be dirtied between the 2 calls, this
628 * Wakeup any thread waiting for the page to be un-cleaning.
649 * Purpose: setup a page to be cleaned (made non-dirty), but not
650 * necessarily flushed from the VM page cache.
653 * The page must not be busy, and new_object
670 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
677 * Mark original page as cleaning in place.
684 * Convert the fictitious page to a private shadow of
685 * the real page.
706 * Causes the specified page to be initialized in
711 * The page is moved to a temporary object and paged out.
714 * The page in question must not be on any pageout queues.
716 * The page must be busy, but not hold a paging reference.
719 * Move this page to a completely new object.
730 "vm_pageout_initialize_page, page 0x%X\n",
735 * Verify that we really want to clean this page
756 * If there's no pager, then we can't clean the page. This should
770 * set the page for future call to vm_fault_list_request
809 * Given a page, queue it to the appropriate I/O thread,
810 * which will page it out and attempt to clean adjacent pages
813 * The page must be busy, and the object and queues locked. We will take a
818 * The page must not be on any pageout queue.
829 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
839 * Only a certain kind of page is appreciated here.
881 * A page is back from laundry or we are stealing it back from
885 * Object and page queues must be locked.
1106 * Page States: Used below to maintain the page state
1119 * If a "reusable" page somehow made it back into \
1326 * The page might be absent or busy,
1355 * the page queues lock
1430 * there is no one waiting for a page.
1450 * If the purge fails, fall through to reclaim a page instead.
1637 * blocked waiting for pages... we'll move one page for each of
1840 * Try a clean-queue inactive page, if we are still trying to fill the free list.
1873 * if we've gotten here, we have no victim page.
1918 * we just found this page on one of our queues...
1945 * the object associated with candidate page is
1956 * page queues lock, we can only 'try' for this one.
1979 * m->object must be stable since we hold the page queues lock...
2029 * if this page has already been picked up as
2030 * part of a page-out cluster, it will be busy
2040 * A "busy" page should still be left alone for
2042 * not to process that page too much.
2049 * Somebody is already playing with this page.
2081 * we can reclaim the page... in the no longer alive case,
2082 * there are 2 states the page can be in that preclude us
2113 * remove page from object here since we're already
2142 * If the object is empty, the page must be reclaimed even
2144 * If the page belongs to a volatile object, we stick it back
2150 /* unmap the page */
2157 /* we saved the cost of cleaning this page ! */
2180 * A "busy" page should always be left alone, except...
2185 * We could get here with a "busy" page
2194 panic("\"busy\" page considered for pageout\n");
2202 * to make sure the page is unreferenced.
2218 * If already cleaning this page in place and it hasn't
2220 * We can leave the page mapped, and upl_commit_range
2229 * this page has been marked for destruction
2237 * and the UPL has already gathered this page...
2242 * page with m->pageout and still on the queues means that an
2243 * MS_INVALIDATE in progress on this page... leave it alone
2262 /* deal with a rogue "reusable" page */
2268 * The page we pulled off the inactive list has
2298 * The page was/is being used, so put back on active list.
2329 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
2333 * we've got a candidate page to steal...
2436 * we've got a page that we can steal...
2443 * page was still mapped up to the pmap_disconnect
2446 * we also check for the page being referenced 'late'
2449 * Note that if 'pmapped' is FALSE then the page is not
2452 * have been set in anticipation of likely usage of the page.
2462 /* If m->reference is already set, this page must have
2481 * since the last page was 'stolen'
2486 * If it's clean and not precious, we can free the page.
2499 /* page on clean queue used to be dirty; we should increment the vm_stat pageout count here */
2512 * we have met our free page target and this page wasn't just pulled
2519 * so that we don't just reactivate the page when we
2531 inactive_burst_count = 0; /* we found a usable page on the inactive queue, hooray */
2537 * OK, at this point we have found a page we are going to free.
2562 * The page may have been dirtied since the last check
2564 * if the page was clean then). With the dirty page
2579 * sure, we might need free pages, but this page is going to take time to become free
2722 panic("slid page %p not allowed on this path\n", m);
2731 * page is tabled in so that we can relookup this
2732 * page after we've taken the object lock - these
2733 * fields are stable while we hold the page queues lock
2735 * this page in this object... we hold an activity_in_progress
2769 * it's either the same page that someone else has
2772 * the page has been freed or we have found a
2773 * new page at this offset... in all of these cases
2775 * we took when we put the page on the pageout queue
2786 * If there is no memory object for the page, create
2799 * Reactivate the page.
2830 * so there is nowhere for the page to go.
2834 * Just free the page... VM_PAGE_FREE takes
2859 * we don't hold the page queue lock
2866 * took when we queued up this page and replace it
3378 * A page list structure, listing the physical pages
3389 * if a page list structure is present
3391 * page is not present, return a non-initialized
3394 * possible copies of the page. Leave pages busy
3395 * in the original object, if a page list structure
3396 * was specified. When a commit of the page list
3399 * If a page list structure is present, return
3400 * all mapped pages. Where a page does not exist
3402 * the original object. If a page list structure
3407 * page cache handling code, will never actually make a request
3629 * page is on inactive list and referenced...
3642 * if we were the page stolen by vm_pageout_scan to be
3645 * then we only need to check for the page being dirty or
3653 * this is a request for a PAGEOUT cluster and this page
3666 * the page... go on to the next one
3670 * if we get here, the page is not 'cleaning' (filtered out above).
3672 * so we don't pay the cost of an I/O to clean a page
3697 * page. We will have to wait.
3705 * The caller is gathering this page and might
3707 * page before adding it to the UPL, so that
3715 * mark page as busy while decrypt
3735 * we've buddied up a page for a clustered pageout
3748 * all the pages we will page out that
3778 * Mark original page as cleaning
3798 * Record that this page has been
3815 * We want to deny access to the target page
3821 * vm_pageout_scan() to demote that page
3824 * this page during its scanning while we're
3892 panic("need corner case for fictitious page");
3898 * page. We will have to wait.
3917 * physical page by asking the
3946 * need to allocate a page
4004 * The page is going to be encrypted when we
4010 * Otherwise, the page will not contain
4043 * Mark original page as cleaning
4085 * deny access to the target page while
4099 * expect the page to be used
4136 * we are working with a fresh page and we've
4143 * someone is explicitly grabbing this page...
4290 * page to be written out who's offset is beyond the
4681 panic("vm_upl_map: page missing\n");
4685 * Convert the fictitious page to a private
4686 * shadow of the real page.
4693 * since m is a page in the upl it must
4696 * page to the alias
4708 * The virtual page ("m") has to be wired in some way
4709 * here or its physical page ("m->phys_page") could
4712 * get an encrypted page here. Since the encryption
4713 * key depends on the VM page's "pager" object and
4716 * sharing the same physical page: we could end up
4717 * encrypting with one key (via one VM page) and
4718 * decrypting with another key (via the alias VM page).
4772 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
5031 * No page list to get the code-signing info from !?
5100 * This page is no longer dirty
5121 * for this page.
5146 * releasing the BUSY bit on this page
5165 * This page is no longer dirty
5187 * Occurs when the original page was wired
5218 * page was re-dirtied after we started
5234 * page has been successfully cleaned
5260 * between the vm page and the backing store
5277 /* this page used to be dirty; now it's on the clean queue. */
5284 * page coming back in from being 'frozen'...
5311 * Clear the "busy" bit on this page before we
5317 * Wakeup any thread waiting for the page to be un-cleaning.
5620 * This page was a part of a speculative
5623 * page and no one will clean up its
5633 * If the page was already encrypted,
5646 * this is an 'absent' page, but
5649 * stabilizing the page...
5676 * for this page.
5686 * of stabilizing the page...
5687 * we will either free the page
5716 * reference this page... for
6126 * If the page is encrypted, we need to decrypt it,
6127 * so force a soft page fault.
6138 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
6171 * we only get back an absent page if we
6185 * top-level placeholder page, if any.
6238 /* success but no page: fail */
6255 * Someone else is cleaning this page in place.
6257 * page but they'll probably end up clearing the "busy"
6282 * can't substitute if the page is already wired because
6299 * want anyone refaulting this page in and using
6301 * to find the new page being substituted.
6325 * vm_page_grablo returned the page marked
6337 * Mark the page "busy" to block any future page fault
6338 * on this page in addition to wiring it.
6346 * expect the page to be used
6347 * page queues lock must be held to set 'reference'
6382 * someone is explicitly grabbing this page...
6419 * page faults will block.
6421 * can't be accessed without causing a page fault.
6439 panic("vm_object_iopl_request: Wired page missing. \n");
6442 * if we've already processed this page in an earlier
6458 * vm_page_wire on this page
6618 * data, so we have to actually remove the encrypted pages from the page
6620 * locate the virtual page in its page table and will trigger a page
6621 * fault. We can then decrypt the page and enter it in the page table
6622 * again. Whenever we allow the user to access the contents of a page,
6635 * a physical page.
6699 * The page should also be kept busy to prevent
6705 vm_page_t page,
6719 if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
6720 assert(page->busy);
6723 * and just enter the VM page in the kernel address space
6743 /* found a space to map our page ! */
6770 * map the physical page to that virtual address.
6778 page->pmapped = TRUE;
6782 * and the actual use of the page by the kernel,
6788 page,
6803 * addresses. Just map the page in the kernel
6847 * Enter the mapped pages in the page table now.
6852 * until after the kernel is done accessing the page(s).
6861 page = vm_page_lookup(object, offset + page_map_offset);
6862 if (page == VM_PAGE_NULL) {
6863 printf("vm_paging_map_object: no page !?");
6873 page->pmapped = TRUE;
6875 //assert(pmap_verify_free(page->phys_page));
6878 page,
6952 * have a different one for each page we encrypt, so that
7055 * Encrypt the given page, for secure paging.
7056 * The page might already be mapped at kernel virtual
7061 * The page's object is locked, but this lock will be released
7063 * The page is busy and not accessible by users (not entered in any pmap).
7067 vm_page_t page,
7085 assert(page->busy);
7087 if (page->encrypted) {
7094 assert(page->dirty || page->precious);
7096 ASSERT_PAGE_DECRYPTED(page);
7103 vm_object_paging_begin(page->object);
7107 * The page hasn't already been mapped in kernel space
7113 page,
7114 page->object,
7115 page->offset,
7121 "could not map page in kernel: 0x%x\n",
7137 * page to obfuscate the encrypted data a bit more and
7142 encrypt_iv.vm.pager_object = page->object->pager;
7144 page->object->paging_offset + page->offset;
7154 * Encrypt the page.
7165 * Unmap the page from the kernel's address space,
7170 vm_paging_unmap_object(page->object,
7179 * The page was kept busy and disconnected from all pmaps,
7185 pmap_clear_refmod(page->phys_page, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
7187 page->encrypted = TRUE;
7189 vm_object_paging_end(page->object);
7195 * Decrypt the given page.
7196 * The page might already be mapped at kernel virtual
7201 * The page's VM object is locked but will be unlocked and relocked.
7202 * The page is busy and not accessible by users (not entered in any pmap).
7206 vm_page_t page,
7221 assert(page->busy);
7222 assert(page->encrypted);
7224 was_dirty = page->dirty;
7231 vm_object_paging_begin(page->object);
7235 * The page hasn't already been mapped in kernel space
7241 page,
7242 page->object,
7243 page->offset,
7249 "could not map page in kernel: 0x%x\n",
7262 * used to encrypt that page.
7265 decrypt_iv.vm.pager_object = page->object->pager;
7267 page->object->paging_offset + page->offset;
7277 * Decrypt the page.
7287 * Unmap the page from the kernel's address space,
7292 vm_paging_unmap_object(page->object,
7299 * The pager did not specify that the page would be
7305 * After decryption, the page is actually still clean.
7311 page->dirty = FALSE;
7312 assert (page->cs_validated == FALSE);
7313 pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
7315 page->encrypted = FALSE;
7318 * We've just modified the page's contents via the data cache and part
7320 * Since the page is now available and might get gathered in a UPL to
7324 pmap_sync_page_attributes_phys(page->phys_page);
7326 * Since the page is not mapped yet, some code might assume that it
7328 * that page. That code relies on "pmapped" being FALSE, so that the
7329 * caches get synchronized when the page is first mapped.
7331 assert(pmap_verify_free(page->phys_page));
7332 page->pmapped = FALSE;
7333 page->wpmapped = FALSE;
7335 vm_object_paging_end(page->object);
7360 vm_page_t page;
7427 page = vm_page_lookup(shadow_object,
7429 if (page == VM_PAGE_NULL) {
7431 "no page for (obj=%p,off=0x%llx+0x%x)!\n",
7437 * Disconnect the page from all pmaps, so that nobody can
7439 * accesses to this page will cause a page fault and block
7440 * while the page is busy being encrypted. After the
7442 * page fault and the page gets decrypted at that time.
7444 pmap_disconnect(page->phys_page);
7445 vm_page_encrypt(page, 0);
7478 __unused vm_page_t page,
7485 __unused vm_page_t page,
7493 * page->object must be locked
7496 vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked)
7510 vm_pageout_throttle_up(page);
7873 /* make sure our page belongs to the one object allowed to do this */
7893 vm_page_t page,
7901 assert(!page->slid);
7903 if (page->error)
7911 vm_object_paging_begin(page->object);
7915 * The page hasn't already been mapped in kernel space
7921 page,
7922 page->object,
7923 page->offset,
7929 "could not map page in kernel: 0x%x\n",
7938 * Slide the pointers on the page.
7941 /*assert that slide_file_info.start/end are page-aligned?*/
7943 pageIndex = (uint32_t)((page->offset - slide_info.start)/PAGE_SIZE);
7948 * Unmap the page from the kernel's address space,
7951 vm_paging_unmap_object(page->object,
7956 page->dirty = FALSE;
7957 pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
7962 page,
7963 page->object, page->offset,
7964 page->object->pager,
7965 page->offset + page->object->paging_offset);
7969 page->slid = TRUE;
7971 page->error = TRUE;
7975 vm_object_paging_end(page->object);