• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:object

112  * an object at least this big.
145 extern void vm_fault_classify(vm_object_t object,
187 * The paging reference for "object" is released.
188 * "object" is unlocked.
190 * freed and the paging reference for the object
194 * "object" must be locked.
198 register vm_object_t object,
201 vm_object_paging_end(object);
202 vm_object_unlock(object);
205 object = top_page->object;
207 vm_object_lock(object);
209 vm_object_paging_end(object);
210 vm_object_unlock(object);
251 * object must have at least the shared lock held
256 vm_object_t object,
264 last_alloc = object->last_alloc;
265 sequential = object->sequential;
334 if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
336 * if someone else has already updated object->sequential
337 * don't bother trying to update it or object->last_alloc
347 * one thread is banging on this object, no problem with the unprotected
353 object->last_alloc = offset;
365 * object must be locked.
372 vm_object_t object,
381 dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
384 if (object == kernel_object || vm_page_deactivate_behind == FALSE) {
386 * Do not deactivate pages from the kernel object: they
392 if ((sequential_run = object->sequential)) {
405 m = vm_page_lookup(object, offset - PAGE_SIZE_64);
409 m = vm_page_lookup(object, offset + PAGE_SIZE_64);
417 * long enough on an object with default access behavior
423 m = vm_page_lookup(object, offset - behind);
426 m = vm_page_lookup(object, offset + behind);
437 dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
451 * object must be locked
452 * object == m->object
455 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state)
457 if (object->shadow_severed) {
464 vm_fault_cleanup(object, first_m);
480 vm_fault_cleanup(object, first_m);
497 vm_fault_cleanup(object, first_m);
511 * m->object must be locked
548 assert(m->object != kernel_object);
552 (m->object->purgable == VM_PURGABLE_DENY ||
553 m->object->purgable == VM_PURGABLE_NONVOLATILE ||
554 m->object->purgable == VM_PURGABLE_VOLATILE )) {
563 if (m->object->size > VM_ZF_OBJECT_SIZE_THRESHOLD) {
576 * specified by the given virtual memory object
599 * The source object must be locked and referenced,
604 * If the call succeeds, the object in which "result_page"
606 * If this is not the original object, a busy page in the
607 * original object is returned in "top_page", to prevent other
609 * reference for the original object. The "top_page" should
619 vm_object_offset_t first_offset, /* Offset into object */
626 vm_page_t *top_page, /* Page in top object, if
643 vm_object_t object;
664 * the object currently reside on backing store. This existence map
666 * created at the time of the first pageout against the object, i.e.
667 * at the same time pager for the object is created. The optimization
671 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
672 * either marked as paged out in the existence map for the object or no
673 * existence map exists for the object. MUST_ASK_PAGER() is one of the
677 * permanent objects. Note also that if the pager for an internal object
679 * of MUST_ASK_PAGER() and that clustered pagein scans are only done on an object
682 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
683 * is marked as paged out in the existence map for the object. PAGED_OUT()
685 * into a copy object in order to avoid a redundant page out operation.
748 * 1) At all times, we must either have the object
749 * lock or a busy page in some object to prevent
759 * object before we do, we must keep a busy page in
760 * the top object while following the shadow chain.
762 * 3) We must increment paging_in_progress on any object
764 * the object lock
771 object = first_object;
779 (integer_t)object, offset, fault_type, *protection, 0);
790 if (!object->alive) {
792 * object is no longer valid
795 vm_fault_cleanup(object, first_m);
804 m = vm_page_lookup(object, offset);
806 dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
823 wait_result = PAGE_SLEEP(object, m, interruptible);
826 (integer_t)object, offset,
831 vm_fault_cleanup(object, first_m);
865 vm_fault_cleanup(object, first_m);
882 vm_fault_cleanup(object, first_m);
899 vm_fault_cleanup(object, first_m);
910 * in the top object) and move on down to the
911 * next object (if there is one).
914 dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */
916 next_object = object->shadow;
922 * busy in the first object, and free
934 error = vm_fault_check(object, m, first_m, interruptible_state);
941 (integer_t)object, offset,
945 if (object != first_object) {
952 * drop reference and lock on current object
954 vm_object_paging_end(object);
955 vm_object_unlock(object);
965 object = first_object;
968 vm_object_lock(object);
986 vm_object_paging_end(object);
987 else if (object != first_object) {
988 vm_object_paging_end(object);
1001 (integer_t)object, offset,
1003 offset+object->shadow_offset,0);
1005 offset += object->shadow_offset;
1006 fault_info->lo_offset += object->shadow_offset;
1007 fault_info->hi_offset += object->shadow_offset;
1011 vm_object_unlock(object);
1012 object = next_object;
1013 vm_object_paging_begin(object);
1024 && ((object != first_object) || (object->copy != VM_OBJECT_NULL))
1041 (integer_t)object, offset,
1044 * take an extra ref so that object won't die
1046 vm_object_reference_locked(object);
1048 vm_fault_cleanup(object, first_m);
1051 vm_object_lock(object);
1052 assert(object->ref_count > 0);
1054 m = vm_page_lookup(object, offset);
1059 vm_object_unlock(object);
1061 vm_object_deallocate(object);
1065 vm_object_unlock(object);
1067 vm_object_deallocate(object);
1101 assert(object == m->object);
1114 if (m->object->code_signed) {
1118 * memory object but we don't need to
1122 * gets copied to another object as a result
1131 * remove the page from the queue, but not the object
1138 (integer_t)object, offset, (integer_t)m, 0, 0);
1148 * we get here when there is no page present in the object at
1151 * this object can provide the data or we're the top object...
1152 * object is locked; m == NULL
1154 look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset) == TRUE) && !data_supply);
1157 dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */
1159 if ((look_for_page || (object == first_object)) && !must_be_resident && !object->phys_contiguous) {
1161 * Allocate a new page for this object/offset pair
1165 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1169 vm_fault_cleanup(object, first_m);
1174 vm_page_insert(m, object, offset);
1183 if (!object->pager_ready) {
1192 (integer_t)object, offset, 0, 0, 0);
1195 * take an extra ref so object won't die
1197 vm_object_reference_locked(object);
1198 vm_fault_cleanup(object, first_m);
1201 vm_object_lock(object);
1202 assert(object->ref_count > 0);
1204 if (!object->pager_ready) {
1205 wait_result = vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGER_READY, interruptible);
1207 vm_object_unlock(object);
1210 vm_object_deallocate(object);
1214 vm_object_unlock(object);
1215 vm_object_deallocate(object);
1221 if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) {
1224 * requests pending on this external object, we
1233 * take an extra ref so object won't die
1235 vm_object_reference_locked(object);
1237 vm_fault_cleanup(object, first_m);
1241 vm_object_lock(object);
1242 assert(object->ref_count > 0);
1244 if (object->paging_in_progress > vm_object_pagein_throttle) {
1245 vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS, interruptible);
1247 vm_object_unlock(object);
1249 vm_object_deallocate(object);
1253 vm_object_unlock(object);
1254 vm_object_deallocate(object);
1270 dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */
1275 * holding the object lock. If that has happened, then bail out
1279 pager = object->pager;
1282 vm_fault_cleanup(object, first_m);
1289 * so we can release the object lock.
1292 vm_object_unlock(object);
1295 * If this object uses a copy_call strategy,
1296 * and we are interested in a copy of this object
1303 * the object that it manages.
1305 if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object)
1312 (integer_t)object, offset, (integer_t)m,
1320 offset + object->paging_offset,
1326 dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
1328 vm_object_lock(object);
1332 vm_fault_cleanup(object, first_m);
1341 vm_fault_cleanup(object, first_m);
1346 if (m == VM_PAGE_NULL && object->phys_contiguous) {
1348 * No page here means that the object we
1351 * with Virtual VRAM, the object might not
1353 * so we're done here only if the object is
1355 * Otherwise, if the object is no longer
1357 * page fault against the object's new backing
1358 * store (different memory object).
1370 * Retry with same object/offset, since new data may
1378 * We get here if the object has no pager, or an existence map
1385 dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
1387 if (object == first_object)
1394 (integer_t)object, offset, (integer_t)m,
1395 (integer_t)object->shadow, 0);
1397 next_object = object->shadow;
1402 * fill the page in the top object with zeros.
1406 if (object != first_object) {
1407 vm_object_paging_end(object);
1408 vm_object_unlock(object);
1410 object = first_object;
1412 vm_object_lock(object);
1415 assert(m->object == object);
1425 error = vm_fault_check(object, m, first_m, interruptible_state);
1434 vm_fault_cleanup(object, VM_PAGE_NULL);
1439 vm_page_insert(m, object, offset);
1447 * Move on to the next object. Lock the next
1448 * object before unlocking the current one.
1450 if ((object != first_object) || must_be_resident)
1451 vm_object_paging_end(object);
1453 offset += object->shadow_offset;
1454 fault_info->lo_offset += object->shadow_offset;
1455 fault_info->hi_offset += object->shadow_offset;
1459 vm_object_unlock(object);
1461 object = next_object;
1462 vm_object_paging_begin(object);
1476 * top-level object;
1479 * The current object (object) is locked. A paging
1485 dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
1507 (integer_t)object, offset, (integer_t)m,
1512 * already owned by the top-level object,
1514 * by the top-level object.
1516 if ((object != first_object) && (m != VM_PAGE_NULL)) {
1519 dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
1539 vm_fault_cleanup(object, first_m);
1552 * the lock on an intermediate object (since we
1553 * have the bottom object locked). We can't
1554 * unlock the bottom object, because the page
1558 * we have no more use for the bottom object,
1573 vm_fault_cleanup(object, first_m);
1580 (integer_t)object, offset,
1603 * We no longer need the old page or object.
1606 vm_object_paging_end(object);
1607 vm_object_unlock(object);
1614 object = first_object;
1617 vm_object_lock(object);
1630 vm_page_insert(copy_m, object, offset);
1636 * way, let's try to collapse the top object.
1640 vm_object_paging_end(object);
1641 vm_object_collapse(object, offset, TRUE);
1642 vm_object_paging_begin(object);
1649 * copy object. The use of asymmetric copy on write for
1652 * shadowed object, and one here to push it into the copy.
1665 * copied to the copy-object, we have to copy it there.
1684 vm_object_unlock(object);
1688 vm_object_lock(object);
1695 * Make another reference to the copy-object,
1708 * Copy object doesn't cover this page -- do nothing.
1713 * Page currently exists in the copy object
1723 * take an extra ref so object won't die
1727 vm_fault_cleanup(object, first_m);
1763 * in the copy-object, and has already been paged out.
1768 * We must copy the page to the copy object.
1787 vm_fault_cleanup(object, first_m);
1808 vm_fault_cleanup(object, first_m);
1814 * Must copy page into copy-object.
1820 * of the copy-object, it must be removed
1851 * dirty is protected by the object lock
1861 vm_object_unlock(object);
1864 * Write the page to the copy-object,
1876 if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) {
1879 vm_object_lock(object);
1884 * Pick back up the old object's
1887 * object tree.]
1889 vm_object_lock(object);
1893 * in the object tree, we must restart
1926 (integer_t)object, offset, (integer_t)m, (integer_t)first_m, 0);
1936 if (m->object->internal) {
1947 vm_fault_is_sequential(object, offset, fault_info->behavior);
1949 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
1954 vm_object_unlock(object);
1980 * 3. the page belongs to a code-signed object
1986 (page)->object->code_signed /*3*/ && \
1992 * m->object must be locked
1994 * NOTE: m->object could be locked "shared" only if we are called
1996 * careful not to modify the VM object in any way that is not
2015 vm_object_lock_assert_held(m->object);
2025 cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
2049 if (m->object->internal) {
2073 vm_object_lock_assert_exclusive(m->object);
2079 /* VM map is locked, so 1 ref will remain on VM object */
2118 (long long)vaddr, m, m->object, m->offset);
2137 vm_object_lock_assert_exclusive(m->object);
2235 vm_object_t object; /* Top-level object */
2238 vm_object_t old_copy_object; /* Saved copy object */
2301 * Find the backing store object and offset into
2310 &object, &offset, &prot, &wired,
2329 * we must hold the top object lock exclusively
2335 if (vm_object_lock_upgrade(object) == FALSE) {
2340 vm_object_lock(object);
2349 vm_fault_classify(object, offset, fault_type);
2353 * possible while holding the map lock and object locks.
2354 * Busy pages are not used until the object lock has to
2357 * point, and object references aren't used.
2369 * - Have to push page into copy object.
2373 * refer to the current object being examined. object and offset
2374 * are the original object from the map. The loop is at the
2375 * top level if and only if object and cur_object are the same.
2378 * original object and cur_object (if different) when
2385 * If this page is to be inserted in a copy delay object
2386 * for writing, and if the object has a copy, then the
2389 if (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY &&
2390 object->copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE))
2393 cur_object = object;
2405 * have object that 'm' belongs to locked exclusively
2407 if (object != cur_object) {
2408 vm_object_unlock(object);
2418 * the top object lock associated with this page
2433 if (vm_object_lock_upgrade(object) == FALSE) {
2437 * will have dropped the object lock and
2441 * at the top level of the object chain
2443 vm_object_lock(object);
2487 * the page's VM object lock.
2490 * have object that 'm' belongs to locked exclusively
2492 if (object != cur_object) {
2493 vm_object_unlock(object);
2503 * the top object lock associated with this page
2518 if (vm_object_lock_upgrade(object) == FALSE) {
2522 * will have dropped the object lock and
2526 * at the top level of the object chain
2528 vm_object_lock(object);
2557 * want to hold the VM object exclusively.
2559 if (object != cur_object) {
2561 vm_object_unlock(object);
2577 if (vm_object_lock_upgrade(object) == FALSE) {
2581 * will have dropped the object lock and
2585 * at the top level of the object chain
2587 vm_object_lock(object);
2595 * - At top level w/o copy object.
2600 if (object == cur_object && object->copy == VM_OBJECT_NULL) {
2604 * might not have taken the object lock
2622 if (object != cur_object) {
2624 * We still need to hold the top object
2629 * object.
2638 * Let's just record what the top object
2641 top_object = object;
2644 * switch to the object that has the new page
2646 object = cur_object;
2652 * object and map are both locked
2654 * object == m->object
2656 * no paging references on either object or cur_object
2684 * It's safe to drop the top object
2697 vm_object_collapse(object, offset, TRUE);
2705 vm_fault_is_sequential(object, cur_offset, fault_info.behavior);
2707 vm_fault_deactivate_behind(object, cur_offset, fault_info.behavior);
2715 vm_object_unlock(object);
2727 * object->copy must not be NULL (else control
2729 * have a potential push into the copy object
2732 if (cur_object == object) {
2747 * object. Give up if allocate fails. Also
2752 * object and cur_object... no need to take
2754 * we don't drop either object lock until
2774 vm_page_insert(m, object, offset);
2778 * Now cope with the source page and object
2780 if (object->ref_count > 1 && cur_m->pmapped)
2788 * The object from which we've just
2795 if (cur_object->copy == object) {
2799 * object into its backing object.
2802 } else if (cur_object->copy == object->shadow &&
2803 object->shadow->resident_page_count == 0) {
2843 * inserted into the original object.
2847 if (object != cur_object)
2849 vm_object_unlock(object);
2865 if (object != cur_object)
2867 vm_object_unlock(object);
2890 if (cur_object != object) {
2893 cur_object = object;
2899 if (vm_object_lock_upgrade(object) == FALSE) {
2902 * since we dropped the object lock which
2913 m = vm_page_alloc(object, offset);
2950 if (cur_object != object)
2959 * Cleanup from fast fault failure. Drop any object
2962 if (object != cur_object)
2966 * must own the object lock exclusively at this point
2971 if (vm_object_lock_upgrade(object) == FALSE) {
2979 vm_object_lock(object);
2989 * Make a reference to this object to
2995 vm_object_reference_locked(object);
2996 vm_object_paging_begin(object);
3002 kr = vm_fault_page(object, offset, fault_type,
3011 * has been dropped and the object unlocked... the ref_count
3015 * is still held along with the ref_count on the original object
3017 * if m != NULL, then the object it belongs to
3021 * object it belongs to has a paging reference
3026 * we didn't succeed, lose the object reference immediately.
3028 vm_object_deallocate(object);
3060 ((top_page == VM_PAGE_NULL) == (m->object == object)));
3081 old_copy_object = m->object->copy;
3082 vm_object_unlock(m->object);
3087 * no object locks are held at this point
3125 vm_object_lock(m->object);
3129 vm_fault_cleanup(m->object, top_page);
3136 vm_object_lock(object);
3138 vm_fault_cleanup(object, top_page);
3140 vm_object_deallocate(object);
3146 if ((retry_object != object) || (retry_offset != offset)) {
3159 vm_object_lock(m->object);
3163 vm_fault_cleanup(m->object, top_page);
3170 vm_object_lock(object);
3172 vm_fault_cleanup(object, top_page);
3174 vm_object_deallocate(object);
3179 * Check whether the protection has changed or the object
3185 vm_object_lock(m->object);
3187 if (m->object->copy != old_copy_object) {
3189 * The copy object changed while the top-level object
3195 vm_object_lock(object);
3210 vm_fault_cleanup(m->object, top_page);
3212 vm_fault_cleanup(object, top_page);
3214 vm_object_deallocate(object);
3251 vm_fault_cleanup(m->object, top_page);
3252 vm_object_deallocate(object);
3263 * in the object
3273 (!pmap_eligible_for_execute((ppnum_t)(object->shadow_offset >> 12)))) {
3280 vm_fault_cleanup(object, top_page);
3281 vm_object_deallocate(object);
3311 vm_map_lock_read(entry->object.sub_map);
3317 real_map = entry->object.sub_map;
3319 map = entry->object.sub_map;
3327 (entry->object.vm_object != NULL) &&
3328 (entry->object.vm_object == object)) {
3336 (((vm_map_offset_t) (entry->object.vm_object->shadow_offset)) +
3339 (VM_WIMG_MASK & (int)object->wimg_bits), 0);
3346 (((vm_map_offset_t)(entry->object.vm_object->shadow_offset)) +
3349 (VM_WIMG_MASK & (int)object->wimg_bits), 0);
3364 vm_fault_cleanup(m->object, top_page);
3366 vm_fault_cleanup(object, top_page);
3368 vm_object_deallocate(object);
3405 if ((entry->object.vm_object != NULL) &&
3407 entry->object.vm_object->phys_contiguous) {
3466 vm_object_t object;
3469 object = (entry->is_sub_map)
3470 ? VM_OBJECT_NULL : entry->object.vm_object;
3478 if (object != VM_OBJECT_NULL && object->phys_contiguous)
3495 if (object == VM_OBJECT_NULL) {
3514 vm_object_lock(object);
3515 vm_object_paging_begin(object);
3520 object,
3533 * different objects. During a forced unmount, the object is terminated
3539 if (result == VM_FAULT_MEMORY_ERROR && !object->alive)
3545 result_object = result_page->object;
3605 vm_object_t object;
3633 vm_object_paging_end(object); \
3634 vm_object_unlock(object); \
3640 vm_object_deallocate(object); \
3659 * Find the backing store object and offset into it.
3662 object = entry->object.vm_object;
3667 * Make a reference to this object to prevent its
3671 vm_object_lock(object);
3672 vm_object_reference_locked(object);
3673 vm_object_paging_begin(object);
3678 * 1) At all times, we must either have the object
3679 * lock or a busy page in some object to prevent
3690 * Look for page in top-level object. If it's not there or
3695 m = vm_page_lookup(object, offset);
3730 * Give up if the page is being written and there's a copy object
3732 if ((object->copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
3773 vm_object_t object = page->object;
3775 vm_object_lock(object);
3781 vm_fault_cleanup(object, top_page);
3788 vm_object_t object;
3791 object = page->object;
3792 vm_object_lock(object);
3796 vm_object_paging_end(object);
3797 vm_object_unlock(object);
3805 * Copy pages from one virtual memory object to another --
3929 old_copy_object = dst_page->object->copy;
3939 * top page but keep the paging reference on the object
3947 vm_object_unlock(dst_page->object);
3960 * No source object. We will just
4011 (result_page->object == src_object));
4014 vm_object_unlock(result_page->object);
4024 vm_object_lock(dst_page->object);
4026 if (dst_page->object->copy != old_copy_object) {
4027 vm_object_unlock(dst_page->object);
4034 vm_object_unlock(dst_page->object);
4069 vm_object_unlock(dst_page->object);
4083 vm_object_unlock(dst_page->object);
4131 vm_fault_classify(vm_object_t object,
4139 m = vm_page_lookup(object, offset);
4146 ((level == 0) && object->copy == VM_OBJECT_NULL)) {
4154 if (object->pager_created) {
4158 if (object->shadow == VM_OBJECT_NULL) {
4163 offset += object->shadow_offset;
4164 object = object->shadow;
4203 vm_object_t object;
4211 vm_object_lock_assert_exclusive(page->object);
4231 page, page->object, page->offset);
4242 object = page->object;
4243 assert(object->code_signed);
4246 if (!object->alive || object->terminating || object->pager == NULL) {
4248 * The object is terminating and we don't have its pager
4258 assert(!object->internal);
4259 assert(object->pager != NULL);
4260 assert(object->pager_ready);
4262 pager = object->pager;
4271 offset + object->paging_offset,
4285 vm_object_t object;
4293 vm_object_lock_assert_held(page->object);
4300 vm_object_lock_assert_exclusive(page->object);
4315 page, page->object, page->offset);
4324 vm_object_lock_assert_exclusive(page->object);
4326 object = page->object;
4327 assert(object->code_signed);
4332 /* keep page busy while we map (and unlock) the VM object */
4337 * Take a paging reference on the VM object
4341 vm_object_paging_begin(object);
4348 object,
4352 FALSE); /* can't unlock object ! */
4362 assert(object == page->object);
4363 vm_object_lock_assert_exclusive(object);
4370 vm_paging_unmap_object(object, koffset, koffset + ksize);
4375 vm_object_paging_end(object);