• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:page

138 				vm_page_t	page,
142 vm_page_t page);
362 * so, compute a potential page to deactivate and
367 * return TRUE if we actually deactivate a page
448 * prevent us from creating a ZF page...
493 * treat this as if we couldn't grab a page
508 * do the work to zero fill a page and
512 * page queue lock must NOT be held
520 * This is is a zero-fill page fault...
522 * Checking the page lock is a waste of
523 * time; this page was absent, so
524 * it can't be page locked by a pager.
575 * Find the resident page for the virtual memory
579 * The required permissions for the page is given
586 * If the desired page is known to be resident (for
595 * The page containing the proper data is returned
606 * If this is not the original object, a busy page in the
621 boolean_t must_be_resident,/* Must page be resident? */
631 kern_return_t *error_code, /* code if page is in error */
636 * page is provided */
662 * MACH page map - an optional optimization where a bit map is maintained
669 * 'known' that the page does not exist on backing store.
671 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
682 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
684 * PAGED_OUT() is used to determine if a page has already been pushed
685 * into a copy object in order to avoid a redundant page out operation.
749 * lock or a busy page in some object to prevent
751 * the same page.
755 * we use a busy page then.
758 * shadow chain and entering a new page in the top
759 * object before we do, we must keep a busy page in
763 * for which we have a busy page before dropping
767 * If the pageout daemon comes across a busy page,
768 * it will remove the page from the pageout queues.
802 * See whether the page at 'offset' is resident
812 * The page is being brought in,
815 * A possible optimization: if the page
825 "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
844 * Guard page: off limits !
849 * access to the guard page, so it must
863 * guard page: let's deny that !
873 * The page is in error, give up now.
892 * page to another pager, then do so.
906 * The page isn't busy, but is absent,
909 * Remove the non-existent page (unless it's
920 * Absent page at bottom of shadow
921 * chain; zero fill the page we left
923 * the absent page.
929 * us from creating a new zero-fill page
940 "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
947 * free the absent page we just found
958 * grab the original page we
971 * we're going to use the absent page we just found
972 * so convert it to a 'busy' page
978 * zero-fill the page and put it on
1028 * cause us to revoke access to this page, but
1029 * this page is in the process of being cleaned
1032 * revoking access to the original page,
1040 "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n",
1093 * the user needs access to a page that we
1095 * Decrypt the page now.
1117 * We just paged in a page from a signed
1121 * space for the first time or when the page
1128 * We mark the page busy and leave it on
1131 * remove the page from the queue, but not the object
1137 "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
1148 * we get here when there is no page present in the object at
1149 * the offset we're interested in... we'll allocate a page
1161 * Allocate a new page for this object/offset pair
1223 * If there are too many outstanding page
1262 * Indicate that the page is waiting for data
1288 * We have an absent page in place for the faulting offset,
1311 "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n",
1348 * No page here means that the object we
1357 * page fault against the object's new backing
1371 * be in a different page (i.e., m is meaningless at
1379 * exists and indicates the page isn't present on the pager
1380 * or we're unwiring a page. If a pager exists, but there
1382 * the ZF case when the pager can't provide the page
1393 "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n",
1402 * fill the page in the top object with zeros.
1420 * us from creating a new zero-fill page
1469 * This page (m) is:
1474 * The top-level page (first_m) is:
1475 * VM_PAGE_NULL if the page was found in the
1498 * If we found a page, we must have decrypted it before we
1506 "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
1511 * If the page is being written, but isn't
1513 * we have to copy it into a new page owned
1554 * unlock the bottom object, because the page
1557 * Instead, we first copy the page. Then, when
1561 * Note that we copy the page even if we didn't
1566 * Allocate a page for the copy
1587 * page with us, we have to flush all
1588 * uses of the original page, since we
1594 * access to this page, then we could
1603 * We no longer need the old page or object.
1620 * page that we soldered in earlier
1627 * page we just copied into
1648 * Now check whether the page needs to be pushed into the
1651 * satisfy the fault; one above to get the page from a
1664 * If the page is being written, but hasn't been
1673 * If the page was guaranteed to be resident,
1702 * Does the page exist in the copy?
1708 * Copy object doesn't cover this page -- do nothing.
1717 * If the page is being brought
1739 * it's OK if the "copy_m" page is encrypted,
1762 * If PAGED_OUT is TRUE, then the page used to exist
1768 * We must copy the page to the copy object.
1795 * Allocate a page for the copy
1814 * Must copy page into copy-object.
1819 * If the old page was in use by any users
1829 * page out this page, using the "initialize"
1856 * The page is already ready for pageout:
1864 * Write the page to the copy-object,
1892 * Because we're pushing a page upward
1897 * wait result]. Can't turn off the page's
1977 * When soft faulting a page, we have to validate the page if:
1978 * 1. the page is being mapped in user space
1979 * 2. the page hasn't already been found to be "tainted"
1980 * 3. the page belongs to a code-signed object
1981 * 4. the page has not been validated yet or has been mapped for write.
1983 #define VM_FAULT_NEED_CS_VALIDATION(pmap, page) \
1985 !(page)->cs_tainted /*2*/ && \
1986 (page)->object->code_signed /*3*/ && \
1987 (!(page)->cs_validated || (page)->wpmapped /*4*/))
1991 * page queue lock must NOT be held
2029 * This is the first time this page is being
2032 * Part of that page may still be in the data cache
2034 * accessing that page via the instruction cache,
2042 * is the first fault-in of the page (m->pmapped == FALSE)
2083 if (m->cs_tainted /* always invalidate a tainted page */
2086 * Code Signing enforcement invalidates an executable page that
2094 * This page has been tainted and can not be trusted.
2096 * necessary precautions before we enter the tainted page
2104 /* reject the tainted page: abort the page fault */
2108 /* proceed with the tainted page */
2117 "page %p obj %p off 0x%llx *** INVALID PAGE ***\n",
2121 /* proceed with the valid page */
2146 * the page queues. Change wiring
2170 * test again now that we hold the page queue lock
2175 * If this is a no_cache mapping and the page has never been
2176 * mapped before or was previously a no_cache page, then we
2179 * the page is activated as normal.
2207 * Handle page faults, including pseudo-faults
2240 vm_page_t top_page; /* Placeholder page */
2294 * assume we will hit a page in the cache
2322 * If the page is wired, we must fault for the current protection
2369 * - Have to push page into copy object.
2385 * If this page is to be inserted in a copy delay object
2387 * copy delay strategy is implemented in the slow fault page.
2418 * the top object lock associated with this page
2436 * exclusively and go relookup the page since we
2439 * a page at this offset
2469 * Guard page: let the slow path deal with it
2482 * We've soft-faulted (because it's not in the page
2483 * table) on an encrypted page.
2484 * Keep the page "busy" so that no one messes with
2487 * the page's VM object lock.
2503 * the top object lock associated with this page
2521 * exclusively and go relookup the page since we
2524 * a page at this offset
2555 * We might need to validate this page
2580 * exclusively and go relookup the page since we
2583 * a page at this offset
2609 * the page for now and we'll
2633 * the read fault, and the copied page's
2636 * the original page.
2644 * switch to the object that has the new page
2688 * address will either find our page
2689 * and translation or put in a new page
2746 * Allocate a page in the original top level
2748 * need to remember current page, as it's the
2755 * the page has been copied and inserted
2762 * no free page currently available...
2768 * Now do the copy. Mark the source page busy...
2771 * the page copy.
2778 * Now cope with the source page and object
2789 * copied a page is most probably backed
2825 * No page at cur_object, cur_offset... m == NULL
2837 * that the pager doesn't have this page
2904 * a page at this offset
2917 * no free page currently available...
2924 * Now zero fill page...
2925 * the page is probably going to
3064 * What to do with the resulting page from vm_fault_page
3097 * do not try for write permission. If the page is
3198 * If we want to wire down this page, but no longer have
3220 * Put this page into the physical map.
3222 * may cause other faults. The page may be on
3224 * across the page, it will remove it from the queues.
3246 /* abort this page fault */
3414 * page tables and such can be locked down as well.
3421 * We simulate a fault to get the page and enter it
3531 * unmounted, then we won't get a page back from vm_fault_page(). Just
3568 * of addresses may fault, so that page tables and
3580 * Handle common case of a wire down page fault at the given address.
3581 * If successful, the page is inserted into the associated physical map.
3585 * proper page address.
3587 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
3679 * lock or a busy page in some object to prevent
3681 * the same page.
3683 * 2) Once we have a busy page, we must remove it from
3690 * Look for page in top-level object. If it's not there or
3693 * decrypt the page before wiring it down.
3714 * Wire the page down now. All bail outs beyond this
3715 * point must unwire the page.
3723 * Mark page busy for other threads.
3730 * Give up if the page is being written and there's a copy object
3738 * Put this page into the physical map.
3765 * Release a page used by vm_fault_copy.
3770 vm_page_t page,
3773 vm_object_t object = page->object;
3776 PAGE_WAKEUP_DONE(page);
3778 if (!page->active && !page->inactive && !page->throttled)
3779 vm_page_activate(page);
3786 vm_page_t page)
3790 if (page != VM_PAGE_NULL) {
3791 object = page->object;
3794 vm_page_unwire(page);
3808 * Before actually copying a page, the version associated with
3859 * the different offsets on a page boundary.
3888 * start by getting the destination page in order to apply
3933 * destination page are the same. But we can't
3936 * destination page will deadlock. To prevent this we
3937 * wire the page so we can drop busy without having
3938 * the page daemon steal the page. We clean up the
3939 * top page but keep the paging reference on the object
3940 * holding the dest page so it doesn't go away.
3961 * zero-fill the page in dst_object.
4037 * Copy the page, and note that it is dirty
4200 vm_page_t page,
4210 assert(page->busy);
4211 vm_object_lock_assert_exclusive(page->object);
4217 if (page->wpmapped && !page->cs_tainted) {
4219 * This page was mapped for "write" access sometime in the
4222 * [ If the page was already found to be "tainted", no
4225 page->cs_validated = TRUE;
4226 page->cs_tainted = TRUE;
4229 "page %p obj %p off 0x%llx "
4231 page, page->object, page->offset);
4236 if (page->cs_validated) {
4242 object = page->object;
4244 offset = page->offset;
4254 * Since we get here to validate a page that was brought in by
4269 /* verify the SHA1 hash for this page */
4275 page->cs_validated = validated;
4277 page->cs_tainted = tainted;
4283 vm_page_t page)
4293 vm_object_lock_assert_held(page->object);
4299 if (page->wpmapped && !page->cs_tainted) {
4300 vm_object_lock_assert_exclusive(page->object);
4303 * This page was mapped for "write" access sometime in the
4306 * [ If the page was already found to be "tainted", no
4309 page->cs_validated = TRUE;
4310 page->cs_tainted = TRUE;
4313 "page %p obj %p off 0x%llx "
4315 page, page->object, page->offset);
4320 if (page->cs_validated) {
4324 vm_object_lock_assert_exclusive(page->object);
4326 object = page->object;
4328 offset = page->offset;
4330 busy_page = page->busy;
4332 /* keep page busy while we map (and unlock) the VM object */
4333 page->busy = TRUE;
4343 /* map the page in the kernel address space */
4347 page,
4354 panic("vm_page_validate_cs: could not map page: 0x%x\n", kr);
4358 /* validate the mapped page */
4359 vm_page_validate_cs_mapped(page, (const void *) kaddr);
4361 assert(page->busy);
4362 assert(object == page->object);
4366 PAGE_WAKEUP_DONE(page);