• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:object

62  *	Virtual memory object module.
101 * page of memory exists within exactly one object.
103 * An object is only deallocated when all "references"
106 * Associated with each object is a list of all resident
107 * memory pages belonging to that object; this list is
108 * maintained by the "vm_page" module, but locked by the object's
111 * Each object also records the memory object reference
113 * back data (the memory object, field "pager"), etc...
117 * memory object into a virtual address space (vm_map).
120 * memory object are called "permanent", because all changes
126 * with the object can be discarded once it is no longer
129 * A permanent memory object may be mapped into more
132 * object concurrently. Only one thread is allowed to
136 * necessary fields in the virtual memory object structure.
150 * In this case, the virtual memory object (and its
151 * backing storage -- its memory object) only contain
153 * field refers to the virtual memory object that contains
156 * The "copy" field refers to a virtual memory object
158 * this object, in order to implement another form
161 * The virtual memory object structure also records
162 * the attributes associated with its memory object.
173 vm_object_t object);
176 vm_object_t object);
182 vm_object_t object);
191 vm_object_t object,
195 vm_object_t object,
205 * memory object (kernel_object) to avoid wasting data structures.
212 * The submap object is used as a placeholder for vm_map_submap
213 * operations. The object is declared in vm_map.c because it
224 * object structure, be sure to add initialization
235 * When an object from this queue is referenced again,
247 * object cache. It must be held when objects are
250 * memory object based on one of the memory object ports
253 * Ideally, the object cache should be more isolated
278 vm_object_t object; /* corresponding object */
293 static void vm_object_reap(vm_object_t object);
294 static void vm_object_reap_async(vm_object_t object);
333 * pager / cache object association in the hashtable.
355 entry->object = VM_OBJECT_NULL;
371 * Returns a new object with the given size.
377 vm_object_t object)
380 "vm_object_allocate, object 0x%X size 0x%X\n",
381 (integer_t)object, size, 0,0,0);
383 *object = vm_object_template;
384 queue_init(&object->memq);
385 queue_init(&object->msr_q);
387 queue_init(&object->uplq);
389 vm_object_lock_init(object);
390 object->size = size;
397 register vm_object_t object;
399 object = (vm_object_t) zalloc(vm_object_zone);
401 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
403 if (object != VM_OBJECT_NULL)
404 _vm_object_allocate(size, object);
406 return object;
438 "vm object hash entries");
446 * Fill in a template object, for quick initialization
456 * The lock will be initialized for each allocate object in
539 * Initialize the "kernel object"
559 * Initialize the "submap object". Make it as large as the
560 * kernel object so that no limit is imposed on submap sizes.
574 * Create an "extra" reference to this object so that we never
606 * Finish initializing the kernel object.
633 * Release a reference to the specified object,
636 * are gone, storage associated with this object
639 * No object may be locked.
646 register vm_object_t object)
652 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
653 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
655 if (object == VM_OBJECT_NULL)
658 if (object == kernel_object) {
668 if (object->ref_count > 2 ||
669 (!object->named && object->ref_count > 1)) {
675 * The object currently looks like it is not being
679 * object (cache lock + exclusive object lock).
680 * Lock the object "shared" to make sure we don't race with
683 vm_object_lock_shared(object);
684 ref_count_p = (volatile UInt32 *) &object->ref_count;
685 original_ref_count = object->ref_count;
691 (!object->named && original_ref_count > 1)) {
695 (UInt32 *) &object->ref_count);
703 vm_object_unlock(object);
719 while (object != VM_OBJECT_NULL) {
723 * the object; we must lock it before removing
724 * the object.
732 * holding a lock on this object while
734 * object
736 if (vm_object_lock_try(object))
743 assert(object->ref_count > 0);
746 * If the object has a named reference, and only
750 if ((object->ref_count == 2) && (object->named)) {
751 memory_object_t pager = object->pager;
754 /* more mappers for this object */
757 vm_object_mapping_wait(object, THREAD_UNINT);
758 vm_object_mapping_begin(object);
759 vm_object_unlock(object);
771 * holding a lock on this object while
773 * object
775 if (vm_object_lock_try(object))
782 assert(object->ref_count > 0);
784 vm_object_mapping_end(object);
796 /* if the object is terminating, it cannot go into */
800 if ((object->ref_count > 1) || object->terminating) {
801 vm_object_lock_assert_exclusive(object);
802 object->ref_count--;
803 vm_object_res_deallocate(object);
806 if (object->ref_count == 1 &&
807 object->shadow != VM_OBJECT_NULL) {
810 * VM object. We can't tell if it's a valid
812 * object is just part of a possibly stale and
816 * back to this parent object.
817 * But we can try and collapse this object with
820 * We can't bypass this object though, since we
824 vm_object_collapse(object, 0, FALSE);
827 vm_object_unlock(object);
829 ((object = vm_object_cache_trim(TRUE)) !=
838 * before destroying or caching the object.
841 if (object->pager_created && ! object->pager_initialized) {
842 assert(! object->can_persist);
843 vm_object_assert_wait(object,
846 vm_object_unlock(object);
853 * If this object can persist, then enter it in
862 if ((object->can_persist) && (object->alive)) {
867 vm_object_lock_assert_exclusive(object);
868 if (--object->ref_count > 0) {
869 vm_object_res_deallocate(object);
870 vm_object_unlock(object);
873 ((object = vm_object_cache_trim(TRUE)) !=
886 shadow = object->shadow;
887 object->shadow = VM_OBJECT_NULL;
892 * Enter the object onto the queue of
896 assert(object->shadow == VM_OBJECT_NULL);
897 VM_OBJ_RES_DECR(object);
900 (integer_t)object,
907 queue_enter(&vm_object_cached_list, object,
910 vm_object_deactivate_all_pages(object);
911 vm_object_unlock(object);
920 object = shadow;
934 object = vm_object_cache_trim(TRUE);
935 if (object == VM_OBJECT_NULL) {
942 * This object is not cachable; terminate it.
946 (integer_t)object, object->resident_page_count,
947 object->paging_in_progress,
948 (void *)current_thread(),object->ref_count);
950 VM_OBJ_RES_DECR(object); /* XXX ? */
952 * Terminate this object. If it had a shadow,
959 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
960 if(vm_object_terminate(object) != KERN_SUCCESS) {
964 object = shadow;
968 ((object = vm_object_cache_trim(TRUE)) !=
980 * down the cache. If so, remove an object from
989 register vm_object_t object = VM_OBJECT_NULL;
1007 * the first object in the cache.
1014 object = (vm_object_t) queue_first(&vm_object_cached_list);
1015 if(object == (vm_object_t) &vm_object_cached_list) {
1025 vm_object_lock(object);
1026 queue_remove(&vm_object_cached_list, object, vm_object_t,
1031 * Since this object is in the cache, we know
1036 assert(object->pager_initialized);
1037 assert(object->ref_count == 0);
1038 vm_object_lock_assert_exclusive(object);
1039 object->ref_count++;
1042 * Terminate the object.
1043 * If the object had a shadow, we let vm_object_deallocate
1049 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
1050 if(vm_object_terminate(object) != KERN_SUCCESS)
1077 * Upon entry, the object must be locked,
1078 * and the object must have exactly one reference.
1080 * The shadow object reference is left alone.
1082 * The object must be unlocked if its found that pages
1083 * must be flushed to a backing object. If someone
1084 * manages to map the object while it is being flushed
1085 * the object is returned unlocked and unchanged. Otherwise,
1087 * object will cease to exist.
1091 register vm_object_t object)
1134 XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
1135 (integer_t)object, object->ref_count, 0, 0, 0);
1143 if (!object->pageout && (!object->temporary || object->can_persist)
1144 && (object->pager != NULL || object->shadow_severed)) {
1148 while (!queue_empty(&object->memq)) {
1163 * out of the object instead of cleaned in place. This
1166 object->pager_trusted = FALSE;
1168 p = (vm_page_t) queue_first(&object->memq);
1179 vm_object_paging_wait(object, THREAD_UNINT);
1183 panic("vm_object_terminate.3 %p %p", object, p);
1210 panic("vm_object_terminate.4 %p %p", object, p);
1216 if ((p->dirty || p->precious) && !p->error && object->alive) {
1223 vm_object_paging_wait(object, THREAD_UNINT);
1225 "vm_object_terminate restart, object 0x%X ref %d\n",
1226 (integer_t)object, object->ref_count, 0, 0, 0);
1250 vm_object_unlock(object);
1252 vm_object_lock(object);
1256 * Make sure the object isn't already being terminated
1258 if(object->terminating) {
1259 vm_object_lock_assert_exclusive(object);
1260 object->ref_count--;
1261 assert(object->ref_count > 0);
1263 vm_object_unlock(object);
1268 * Did somebody get a reference to the object while we were
1271 if(object->ref_count != 1) {
1272 vm_object_lock_assert_exclusive(object);
1273 object->ref_count--;
1274 assert(object->ref_count > 0);
1275 vm_object_res_deallocate(object);
1277 vm_object_unlock(object);
1285 object->terminating = TRUE;
1286 object->alive = FALSE;
1287 vm_object_remove(object);
1290 * Detach the object from its shadow if we are the shadow's
1294 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1295 !(object->pageout)) {
1297 if (shadow_object->copy == object)
1302 if (object->paging_in_progress != 0) {
1305 * on this object, meaning that there are some paging
1306 * or other I/O operations in progress for this VM object.
1308 * up front to ensure that the object doesn't go away, but
1309 * they may also need to acquire a reference on the VM object,
1312 * object, triggering its termination, while still holding
1318 * complete the VM object termination if it still holds
1322 * VM object is "terminating" and not "alive".
1324 vm_object_reap_async(object);
1326 vm_object_unlock(object);
1330 * object's reference on its shadow object yet.
1332 * completed this object's termination.
1337 /* complete the VM object termination */
1338 vm_object_reap(object);
1339 object = VM_OBJECT_NULL;
1340 /* cache lock and object lock were released by vm_object_reap() */
1343 * KERN_SUCCESS means that this object has been terminated
1344 * and no longer needs its shadow object but still holds a
1356 * Complete the termination of a VM object after it's been marked
1359 * The VM object cache and the VM object must be locked by caller.
1360 * The locks will be released on return and the VM object is no longer valid.
1364 vm_object_t object)
1377 vm_object_lock_assert_exclusive(object);
1378 assert(object->paging_in_progress == 0);
1387 pager = object->pager;
1388 object->pager = MEMORY_OBJECT_NULL;
1391 memory_object_control_disable(object->pager_control);
1394 vm_object_lock_assert_exclusive(object);
1395 object->ref_count--;
1397 assert(object->res_count == 0);
1400 assert (object->ref_count == 0);
1403 if (object->objq.next || object->objq.prev) {
1404 purgeable_q_t queue = vm_purgeable_object_remove(object);
1418 * if some faults on this object were aborted.
1420 if (object->pageout) {
1421 assert(object->shadow != VM_OBJECT_NULL);
1423 vm_pageout_object_terminate(object);
1425 } else if ((object->temporary && !object->can_persist) ||
1429 while (!queue_empty(&object->memq)) {
1442 p = (vm_page_t) queue_first(&object->memq);
1460 } else if (!queue_empty(&object->memq)) {
1464 assert(object->paging_in_progress == 0);
1465 assert(object->ref_count == 0);
1473 vm_object_unlock(object);
1475 vm_object_lock(object);
1479 object->terminating = FALSE;
1480 vm_object_paging_begin(object);
1481 vm_object_paging_end(object);
1482 vm_object_unlock(object);
1485 vm_external_destroy(object->existence_map, object->size);
1488 object->shadow = VM_OBJECT_NULL;
1490 vm_object_lock_destroy(object);
1492 * Free the space for the object.
1494 zfree(vm_object_zone, object);
1495 object = VM_OBJECT_NULL;
1500 vm_object_t object)
1505 vm_object_lock_assert_exclusive(object);
1509 /* enqueue the VM object... */
1510 queue_enter(&vm_object_reaper_queue, object,
1519 vm_object_t object, shadow_object;
1525 object,
1528 vm_object_lock(object);
1529 assert(object->terminating);
1530 assert(!object->alive);
1534 * Now that the object is dead, it won't touch any more
1540 while (object->paging_in_progress != 0) {
1542 vm_object_wait(object,
1546 vm_object_lock(object);
1550 object->pageout ? VM_OBJECT_NULL : object->shadow;
1552 vm_object_reap(object);
1553 /* cache is unlocked and object is no longer valid */
1554 object = VM_OBJECT_NULL;
1558 * Drop the reference "object" was holding on
1559 * its shadow object.
1637 * Shut down a VM object, despite the
1643 vm_object_t object,
1648 if (object == VM_OBJECT_NULL)
1661 vm_object_lock(object);
1662 object->can_persist = FALSE;
1663 object->named = FALSE;
1664 object->alive = FALSE;
1670 vm_object_remove(object);
1671 old_pager = object->pager;
1672 object->pager = MEMORY_OBJECT_NULL;
1674 memory_object_control_disable(object->pager_control);
1682 vm_object_paging_wait(object, THREAD_UNINT);
1683 vm_object_unlock(object);
1686 * Terminate the object now.
1698 vm_object_deallocate(object);
1712 * Deactivate all pages in the specified object. (Keep its pages
1715 * The object must be locked.
1719 register vm_object_t object)
1733 queue_iterate(&object->memq, p, vm_page_t, listq) {
1765 vm_object_t object,
1775 * entered with object lock held, acquire a paging reference to
1779 orig_object = object;
1786 vm_object_paging_begin(object);
1791 if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) {
1802 if ((kill_page) && (object->internal)) {
1807 vm_external_state_clr(object->existence_map, offset);
1815 assert(m->object != kernel_object);
1844 vm_object_paging_end(object);
1846 if (object->shadow) {
1851 offset += object->shadow_offset;
1853 tmp_object = object->shadow;
1856 if (object != orig_object)
1857 vm_object_unlock(object);
1858 object = tmp_object;
1862 if (object != orig_object)
1863 vm_object_unlock(object);
1871 * pages in the specified object range.
1875 * the top-level object; only those pages may
1879 * shadow chain from the top-level object to
1882 * The object must *not* be locked. The object must
1892 register vm_object_t object,
1899 if (object == VM_OBJECT_NULL)
1904 vm_object_lock(object);
1906 if (object->phys_contiguous) {
1908 vm_object_unlock(object);
1913 phys_start = object->shadow_offset + offset;
1916 assert(phys_end <= object->shadow_offset + object->size);
1917 vm_object_unlock(object);
1928 assert(object->internal);
1931 if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
1932 vm_object_unlock(object);
1940 if (ptoa_64(object->resident_page_count / 4) < size) {
1947 queue_iterate(&object->memq, p, vm_page_t, listq) {
1957 queue_iterate(&object->memq, p, vm_page_t, listq) {
1976 p = vm_page_lookup(object, target_off);
1988 p = vm_page_lookup(object, target_off);
2003 next_object = object->shadow;
2005 offset += object->shadow_offset;
2007 vm_object_unlock(object);
2008 object = next_object;
2026 vm_object_unlock(object);
2034 * virtual memory object without using
2041 * for the source virtual memory object. The source
2042 * object will be returned *unlocked*.
2052 * A new virtual memory object is returned in a
2054 * new object, starting at a zero offset, are a copy
2082 * Prevent destruction of the source object while we copy.
2089 * Create a new object to hold the copied pages.
2091 * We fill the new object starting at offset 0,
2093 * We don't bother to lock the new object within
2159 * We don't need to hold the object
2164 * Copy the page to the new object.
2172 vm_object_unlock(result_page->object);
2184 vm_object_lock(result_page->object);
2200 vm_fault_cleanup(result_page->object,
2234 * (b) return the null object if
2254 * Lose the extra reference, and return our object.
2266 * memory object, if it can be done without waiting
2275 * The object should be unlocked on entry and exit.
2287 vm_object_t object = *_object;
2292 if (object == VM_OBJECT_NULL) {
2298 vm_object_lock(object);
2300 copy_strategy = object->copy_strategy;
2307 * Make another reference to the object.
2308 * Leave object/offset unchanged.
2311 vm_object_reference_locked(object);
2312 object->shadowed = TRUE;
2313 vm_object_unlock(object);
2327 vm_object_unlock(object);
2331 vm_object_unlock(object);
2345 * Copy the source object (src_object), using the
2349 * The source object must be locked on entry. It
2354 * A new object that represents the copied virtual
2382 * vm object structure? Depends how common this case it.
2402 * Ask the memory manager to give us a memory object
2403 * which represents a copy of the src object.
2404 * The memory manager may give us a memory object
2406 * new memory object. This memory object will arrive
2470 * Copy the specified virtual memory object, using
2496 * to this object, but it has promised not to make any changes on
2500 * Create a new object, called a "copy object" to hold
2503 * Record the original object as the backing object for
2504 * the copy object. If the original mapping does not
2506 * Record the copy object in the original object.
2509 * the copy object.
2510 * Mark the new mapping (the copy object) copy-on-write.
2511 * This makes the copy object itself read-only, allowing
2517 * object is *not* marked copy-on-write. A copied page is pushed
2518 * to the copy object, regardless which party attempted to modify
2522 * original object has not been changed since the last copy, its
2523 * copy object can be reused. Otherwise, a new copy object can be
2524 * inserted between the original object and its previous copy
2525 * object. Since any copy object is read-only, this cannot affect
2526 * affect the contents of the previous copy object.
2528 * Note that a copy object is higher in the object tree than the
2529 * original object; therefore, use of the copy object recorded in
2530 * the original object must be done carefully, to avoid deadlock.
2586 * Determine whether the old copy object has
2596 * the existing copy-object if
2659 * copy object will be large enough to back either the
2660 * old copy object or the new mapping.
2678 * The copy-object is always made large enough to
2679 * completely shadow the original object, since
2681 * the original object at different points.
2698 * We now have the src object locked, and the new copy object
2727 * Make the old copy-object shadow the new one.
2729 * object.
2753 * Point the new copy at the existing object.
2767 "vm_object_copy_delayed: used copy object %X for source %X\n",
2777 * Perform a copy according to the source object's
2882 * Create a new object which is backed by the
2883 * specified existing object range. The source
2884 * object reference is deallocated.
2886 * The new object and offset into that object
2893 vm_object_t *object, /* IN/OUT */
2900 source = *object;
2905 * due to a combination of vm_remap() that changes a VM object's
2931 * Allocate a new object with the given length
2935 panic("vm_object_shadow: no object for shadowing");
2938 * The new object shadows the source object, adding
2940 * to point to the new object, removing a reference to
2941 * the source object. Net result: no change of reference
2947 * Store the offset into the source object,
2948 * and fix up the offset into the new object.
2958 *object = result;
2977 * the memory object control port,
2983 * is asserted. Other mappings using a particular memory object,
2992 * internal object initialization or destruction. [Furthermore,
3004 * the object cannot (or will not) be cached.
3011 * object. [The memory manager may not want to
3012 * destroy the memory object, but may wish to
3017 * the pager field and release the memory object references.
3021 * In addition to the lock on the object, the vm_object_cache_lock
3026 * cannot be used to determine whether a memory object has
3028 * knowledge is important to the shadow object mechanism.]
3045 * Find a VM object corresponding to the given
3046 * pager; if no such object exists, create one,
3057 register vm_object_t object;
3071 * Look for an object associated with this port.
3081 * We must unlock to create a new object;
3092 * to insert; set the object.
3096 entry->object = new_object;
3101 } else if (entry->object == VM_OBJECT_NULL) {
3103 * If a previous object is being terminated,
3116 object = entry->object;
3117 assert(object != VM_OBJECT_NULL);
3120 if (!vm_object_lock_try(object)) {
3129 assert(!internal || object->internal);
3131 assert(!object->named);
3132 object->named = TRUE;
3134 if (object->ref_count == 0) {
3137 (integer_t)object,
3140 queue_remove(&vm_object_cached_list, object,
3144 vm_object_lock_assert_exclusive(object);
3145 object->ref_count++;
3146 vm_object_res_reference(object);
3147 vm_object_unlock(object);
3151 assert(object->ref_count > 0);
3159 (integer_t)pager, (integer_t)object, must_init, 0, 0);
3179 control = memory_object_control_allocate(object);
3182 vm_object_lock(object);
3183 assert(object != kernel_object);
3190 object->pager_created = TRUE;
3191 object->pager = pager;
3192 object->internal = internal;
3193 object->pager_trusted = internal;
3196 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
3198 object->pager_control = control;
3199 object->pager_ready = FALSE;
3201 vm_object_unlock(object);
3208 object->pager_control,
3211 vm_object_lock(object);
3213 object->named = TRUE;
3215 object->pager_ready = TRUE;
3216 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
3219 object->pager_initialized = TRUE;
3220 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
3222 vm_object_lock(object);
3226 * [At this point, the object must be locked]
3231 * thread to map this object.
3234 while (!object->pager_initialized) {
3235 vm_object_sleep(object,
3239 vm_object_unlock(object);
3243 (integer_t)object, (integer_t)object->pager, internal, 0,0);
3244 return(object);
3250 * Create a memory object for an internal object.
3252 * The object is locked on entry and exit;
3256 * vm_object_pager_create on an object at
3263 register vm_object_t object)
3272 XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n",
3273 (integer_t)object, 0,0,0,0);
3275 assert(object != kernel_object);
3284 vm_object_paging_begin(object);
3285 if (object->pager_created) {
3290 while (!object->pager_initialized) {
3291 vm_object_sleep(object,
3295 vm_object_paging_end(object);
3300 * Indicate that a memory object has been assigned
3304 object->pager_created = TRUE;
3305 object->paging_offset = 0;
3308 size = object->size;
3310 vm_object_unlock(object);
3314 vm_object_lock(object);
3315 assert(object->size == size);
3316 object->existence_map = map;
3317 vm_object_unlock(object);
3321 * Create the [internal] pager, and associate it with this object.
3324 * can look up the object to complete initializing it. No
3325 * user will ever map this object.
3333 assert(object->temporary);
3335 /* create our new memory object */
3336 (void) memory_object_create(dmm, object->size, &pager);
3346 entry->object = object;
3355 if (vm_object_enter(pager, object->size, TRUE, TRUE, FALSE) != object)
3363 vm_object_lock(object);
3368 vm_object_paging_end(object);
3374 * Eliminate the pager/object association
3377 * The object cache must be locked.
3381 vm_object_t object)
3385 if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
3390 entry->object = VM_OBJECT_NULL;
3417 * Collapse an object with the object backing it.
3418 * Pages in the backing object are moved into the
3419 * parent, and the backing object is deallocated.
3427 vm_object_t object,
3434 backing_offset = object->shadow_offset;
3435 size = object->size;
3467 * object collapse, so we can just move an encrypted
3468 * page from one object to the other in this case.
3470 * the object lock.
3475 pp = vm_page_lookup(object, new_offset);
3480 * Move the backing object's page up.
3483 vm_page_rename(p, object, new_offset, TRUE);
3500 vm_page_rename(p, object, new_offset, TRUE);
3506 * Parent object has a real page.
3507 * Throw away the backing object's
3516 assert((!object->pager_created && (object->pager == MEMORY_OBJECT_NULL))
3520 assert(!object->pager_created && object->pager == MEMORY_OBJECT_NULL);
3527 * Move the pager from backing_object to object.
3534 assert(!object->paging_in_progress);
3535 object->pager = backing_object->pager;
3536 entry = vm_object_hash_lookup(object->pager, FALSE);
3538 entry->object = object;
3539 object->pager_created = backing_object->pager_created;
3540 object->pager_control = backing_object->pager_control;
3541 object->pager_ready = backing_object->pager_ready;
3542 object->pager_initialized = backing_object->pager_initialized;
3543 object->paging_offset =
3545 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
3546 memory_object_control_collapse(object->pager_control,
3547 object);
3556 * the backing object if there is one. If the shadow offset is
3561 * old map, giving the collapsed object no map. This means that
3566 assert(object->existence_map == VM_EXTERNAL_NULL);
3574 object->existence_map = backing_object->existence_map;
3582 * moves from within backing_object to within object.
3585 assert(!object->phys_contiguous);
3587 object->shadow = backing_object->shadow;
3588 if (object->shadow) {
3589 object->shadow_offset += backing_object->shadow_offset;
3592 object->shadow_offset = 0;
3594 assert((object->shadow == VM_OBJECT_NULL) ||
3595 (object->shadow->copy != backing_object));
3600 * Since the backing object has no pages, no
3601 * pager left, and no object references within it,
3624 vm_object_t object,
3628 * Make the parent shadow the next object
3636 * Do object reference in-line to
3638 * residence count. If object is not
3646 if (object->res_count != 0)
3654 assert(!object->phys_contiguous);
3656 object->shadow = backing_object->shadow;
3657 if (object->shadow) {
3658 object->shadow_offset += backing_object->shadow_offset;
3661 object->shadow_offset = 0;
3665 * Backing object might have had a copy pointer
3668 if (backing_object->copy == object) {
3680 * The res_count on the backing object is
3683 * a "swapped" object, which has a 0 res_count,
3684 * in which case, the backing object res_count
3691 * backing object could be bypassed but not
3692 * collapsed, such as when the backing object
3701 if (object->res_count != 0)
3710 * the backing object.
3714 if (object->res_count == 0) {
3719 vm_object_unlock(object);
3724 * Relock object. We don't have to reverify
3730 vm_object_lock(object);
3740 * Perform an object collapse or an object bypass if appropriate.
3744 * Requires that the object be locked and the page queues be unlocked.
3754 register vm_object_t object,
3771 (integer_t)object, 0,0,0,0);
3773 if (object == VM_OBJECT_NULL)
3776 original_object = object;
3786 * There is a backing object, and
3789 backing_object = object->shadow;
3791 if (object != original_object) {
3792 vm_object_unlock(object);
3798 * No pages in the object are currently
3801 if (object->paging_in_progress != 0) {
3804 if (object != original_object) {
3805 vm_object_unlock(object);
3807 object = backing_object;
3815 * The backing object is not read_only,
3816 * and no pages in the backing object are
3818 * The backing object is internal.
3825 if (object != original_object) {
3826 vm_object_unlock(object);
3828 object = backing_object;
3833 * The backing object can't be a copy-object:
3834 * the shadow_offset for the copy-object must stay
3837 * just shadow the next object in the chain, old
3838 * pages from that object would then have to be copied
3840 * parent object.
3845 if (object != original_object) {
3846 vm_object_unlock(object);
3848 object = backing_object;
3854 * object (if the parent is the only reference to
3859 * object, we may be able to collapse it into the
3873 (!object->pager_created
3881 (integer_t)backing_object, (integer_t)object,
3891 if (object != original_object) {
3892 vm_object_unlock(object);
3899 * Collapse the object with its backing
3900 * object, and try again with the object's
3901 * new backing object.
3904 vm_object_do_collapse(object, backing_object);
3910 * Collapsing the backing object was not possible
3916 if (object != original_object) {
3917 vm_object_unlock(object);
3919 object = backing_object;
3925 * If the object doesn't have all its pages present,
3926 * we have to make sure no pages in the backing object
3929 size = atop(object->size);
3930 rcount = object->resident_page_count;
3938 * If the backing object has a pager but no pagemap,
3948 if (object != original_object) {
3949 vm_object_unlock(object);
3951 object = backing_object;
3956 * If the object has a pager but no pagemap,
3960 if (object->pager_created
3962 && (object->existence_map == VM_EXTERNAL_NULL)
3966 if (object != original_object) {
3967 vm_object_unlock(object);
3969 object = backing_object;
3974 * If all of the pages in the backing object are
3975 * shadowed by the parent object, the parent
3976 * object no longer has to shadow the backing
3977 * object; it can shadow the next one in the
3980 * If the backing object has existence info,
3986 backing_offset = object->shadow_offset;
4003 if (object->cow_hint != ~(vm_offset_t)0)
4004 hint_offset = (vm_object_offset_t)object->cow_hint;
4011 !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
4013 object->cow_hint = (vm_offset_t)hint_offset;
4015 if (object != original_object) {
4016 vm_object_unlock(object);
4018 object = backing_object;
4023 * If the object's window onto the backing_object
4025 * pages in the backing object, it makes sense to
4031 * find the same pages in the backing object'
4058 if (offset < object->size &&
4060 !EXISTS_IN_OBJECT(object, offset, rc)) {
4062 object->cow_hint = (vm_offset_t)offset;
4070 if (object != original_object) {
4071 vm_object_unlock(object);
4073 object = backing_object;
4080 * backing object that show through to the object.
4091 (offset + PAGE_SIZE_64 < object->size) ?
4103 !EXISTS_IN_OBJECT(object, offset, rcount)) {
4105 object->cow_hint = (vm_offset_t)offset;
4111 if (object != original_object) {
4112 vm_object_unlock(object);
4114 object = backing_object;
4121 object->cow_hint = (vm_offset_t)0;
4124 * All interesting pages in the backing object
4126 * Thus we can bypass the backing object.
4129 vm_object_do_bypass(object, backing_object);
4133 * Try again with this object's new backing object.
4139 if (object != original_object) {
4140 vm_object_unlock(object);
4148 * object range from the object's list of pages.
4151 * The object must be locked.
4152 * The object must not have paging_in_progress, usually
4160 register vm_object_t object,
4172 if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
4176 p = vm_page_lookup(object, start);
4187 p = (vm_page_t) queue_first(&object->memq);
4188 while (!queue_end(&object->memq, (queue_entry_t) p)) {
4205 * regions of memory into a single object.
4209 * NOTE: Only works at the moment if the second object is NULL -
4210 * if it's not, which object do we lock first?
4213 * prev_object First object to coalesce
4215 * next_object Second object into coalesce
4222 * The object(s) must *not* be locked. The map must be locked
4223 * to preserve the reference to the object(s).
4257 * Try to collapse the object first
4266 * . shadows another object
4286 * Remove any pages that may still be in the object from
4294 * Extend the object if necessary.
4300 * We cannot extend an object that has existence info,
4302 * the entire object.
4304 * This assertion must be true because the object
4318 * Attach a set of physical pages to an object, so that they can
4319 * be mapped by mapping the object. Typically used to map IO memory.
4326 vm_object_t object,
4348 vm_object_lock(object);
4349 if ((old_page = vm_page_lookup(object, offset))
4362 vm_page_insert(m, object, offset);
4365 vm_object_unlock(object);
4378 vm_object_t object);
4402 register vm_object_t object)
4407 if (object == o) {
4446 vm_object_t object)
4452 if (object == VM_OBJECT_NULL) {
4459 iprintf("object 0x%x", object);
4460 printf(", shadow=0x%x", object->shadow);
4461 printf(", copy=0x%x", object->copy);
4462 printf(", pager=0x%x", object->pager);
4463 printf(", ref=%d\n", object->ref_count);
4466 object = object->shadow;
4478 vm_object_t object;
4484 object = (vm_object_t) (long) db_addr;
4485 if (object == VM_OBJECT_NULL)
4488 iprintf("object 0x%x\n", object);
4492 iprintf("size=0x%x", object->size);
4493 printf(", memq_hint=%p", object->memq_hint);
4494 printf(", ref_count=%d\n", object->ref_count);
4497 printf("res_count=%d, ", object->res_count);
4499 printf("resident_page_count=%d\n", object->resident_page_count);
4501 iprintf("shadow=0x%x", object->shadow);
4502 if (object->shadow) {
4504 vm_object_t shadow = object;
4509 printf(", copy=0x%x", object->copy);
4510 printf(", shadow_offset=0x%x", object->shadow_offset);
4511 printf(", last_alloc=0x%x\n", object->last_alloc);
4513 iprintf("pager=0x%x", object->pager);
4514 printf(", paging_offset=0x%x", object->paging_offset);
4515 printf(", pager_control=0x%x\n", object->pager_control);
4517 iprintf("copy_strategy=%d[", object->copy_strategy);
4518 switch (object->copy_strategy) {
4544 iprintf("all_wanted=0x%x<", object->all_wanted);
4546 if (vm_object_wanted(object, VM_OBJECT_EVENT_INITIALIZED)) {
4550 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGER_READY)) {
4554 if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS)) {
4558 if (vm_object_wanted(object, VM_OBJECT_EVENT_LOCK_IN_PROGRESS)) {
4562 if (vm_object_wanted(object, VM_OBJECT_EVENT_UNCACHING)) {
4566 if (vm_object_wanted(object, VM_OBJECT_EVENT_COPY_CALL)) {
4570 if (vm_object_wanted(object, VM_OBJECT_EVENT_CACHING)) {
4575 printf(", paging_in_progress=%d\n", object->paging_in_progress);
4578 (object->pager_created ? "" : "!"),
4579 (object->pager_initialized ? "" : "!"),
4580 (object->pager_ready ? "" : "!"),
4581 (object->can_persist ? "" : "!"),
4582 (object->pager_trusted ? "" : "!"),
4583 (object->pageout ? "" : "!"),
4584 (object->internal ? "internal" : "external"),
4585 (object->temporary ? "temporary" : "permanent"));
4587 (object->alive ? "" : "!"),
4588 ((object->purgable != VM_PURGABLE_DENY) ? "" : "!"),
4589 ((object->purgable == VM_PURGABLE_VOLATILE) ? "" : "!"),
4590 ((object->purgable == VM_PURGABLE_EMPTY) ? "" : "!"),
4591 (object->shadowed ? "" : "!"),
4592 (vm_object_cached(object) ? "" : "!"),
4593 (object->private ? "" : "!"));
4595 (object->advisory_pageout ? "" : "!"),
4596 (object->silent_overwrite ? "" : "!"));
4600 vm_external_print(object->existence_map, object->size);
4603 iprintf("paging_object=0x%x\n", object->paging_object);
4608 p = (vm_page_t) queue_first(&object->memq);
4609 while (!queue_end(&object->memq, (queue_entry_t) p)) {
4638 boolean_t vm_object_find(vm_object_t object);
4643 vm_object_t object)
4669 obj = entry->object.vm_object;
4674 if (obj == object) {
4694 vm_object_t object,
4703 if(!object->private)
4708 vm_object_lock(object);
4709 if(!object->phys_contiguous) {
4712 vm_object_unlock(object);
4715 base_offset += object->paging_offset;
4717 m = vm_page_lookup(object, base_offset);
4768 vm_page_insert(m, object, base_offset);
4783 object->shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
4784 object->size = size;
4786 vm_object_unlock(object);
4811 register vm_object_t object = VM_OBJECT_NULL;
4822 queue_iterate(&vm_object_cached_list, object,
4824 if (object->pager &&
4825 (pager_ops == object->pager->mo_pager_ops)) {
4826 vm_object_lock(object);
4827 queue_remove(&vm_object_cached_list, object,
4832 * Since this object is in the cache, we know
4838 assert(object->pager_initialized);
4839 assert(object->ref_count == 0);
4840 vm_object_lock_assert_exclusive(object);
4841 object->ref_count++;
4844 * Terminate the object.
4845 * If the object had a shadow, we let
4852 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
4853 if ((vm_object_terminate(object) == KERN_SUCCESS)
4876 vm_object_t object;
4886 (entry->object != VM_OBJECT_NULL)) {
4887 if (entry->object->named == TRUE)
4891 if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE))
4896 /* wait for object (if any) to be ready */
4897 if (object != VM_OBJECT_NULL) {
4898 vm_object_lock(object);
4899 object->named = TRUE;
4900 while (!object->pager_ready) {
4901 vm_object_sleep(object,
4905 *control = object->pager_control;
4906 vm_object_unlock(object);
4915 * Attempt to recover a named reference for a VM object.
4916 * VM will verify that the object has not already started
4920 * KERN_SUCCESS - we recovered a named reference on the object
4921 * KERN_FAILURE - we could not recover a reference (object dead)
4922 * KERN_INVALID_ARGUMENT - bad memory object control
4929 vm_object_t object;
4932 object = memory_object_control_to_vm_object(control);
4933 if (object == VM_OBJECT_NULL) {
4939 vm_object_lock(object);
4941 if (object->terminating && wait_on_terminating) {
4943 vm_object_wait(object,
4950 if (!object->alive) {
4952 vm_object_unlock(object);
4956 if (object->named == TRUE) {
4958 vm_object_unlock(object);
4962 if((object->ref_count == 0) && (!object->terminating)){
4963 queue_remove(&vm_object_cached_list, object,
4968 (integer_t)object,
4975 object->named = TRUE;
4976 vm_object_lock_assert_exclusive(object);
4977 object->ref_count++;
4978 vm_object_res_reference(object);
4979 while (!object->pager_ready) {
4980 vm_object_sleep(object,
4984 vm_object_unlock(object);
5008 vm_object_t object,
5014 while (object != VM_OBJECT_NULL) {
5018 * the object. We must locke it before removing
5019 * the object.
5024 vm_object_lock(object);
5025 assert(object->alive);
5027 assert(object->named);
5028 assert(object->ref_count > 0);
5032 * destroying or caching the object.
5035 if (object->pager_created && !object->pager_initialized) {
5036 assert(!object->can_persist);
5037 vm_object_assert_wait(object,
5040 vm_object_unlock(object);
5046 if (((object->ref_count > 1)
5048 || (object->terminating)) {
5049 vm_object_unlock(object);
5054 vm_object_unlock(object);
5061 (object->ref_count == 1)) {
5063 object->named = FALSE;
5064 vm_object_unlock(object);
5068 vm_object_deallocate(object);
5071 VM_OBJ_RES_DECR(object);
5072 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
5073 if(object->ref_count == 1) {
5074 if(vm_object_terminate(object) != KERN_SUCCESS) {
5083 object = shadow;
5088 vm_object_lock_assert_exclusive(object);
5089 object->ref_count--;
5090 assert(object->ref_count > 0);
5092 object->named = FALSE;
5093 vm_object_unlock(object);
5106 vm_object_t object,
5119 (integer_t)object, offset, size,
5125 if (object == VM_OBJECT_NULL)
5134 * Lock the object, and acquire a paging reference to
5137 vm_object_lock(object);
5138 vm_object_paging_begin(object);
5140 (void)vm_object_update(object,
5143 vm_object_paging_end(object);
5144 vm_object_unlock(object);
5153 * Empty a purgeable object by grabbing the physical pages assigned to it and
5158 * than happy to grab these since this is a purgeable object. We mark the
5159 * object as "empty" after reaping its pages.
5161 * On entry the object and page queues are locked, the object must be a
5162 * purgeable object with no delayed copies pending.
5165 vm_object_purge(vm_object_t object)
5178 if (object->purgable == VM_PURGABLE_DENY)
5181 assert(object->purgable != VM_PURGABLE_NONVOLATILE);
5182 object->purgable = VM_PURGABLE_EMPTY;
5184 assert(object->copy == VM_OBJECT_NULL);
5185 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5192 * Go through the object's resident pages and try and discard them.
5194 next = (vm_page_t)queue_first(&object->memq);
5195 while (!queue_end(&object->memq, (queue_entry_t)next)) {
5203 * Keep holding the object's lock to guarantee that
5204 * the object's page list doesn't change under us
5253 assert(p->object != kernel_object);
5279 * a page from an "empty" object, so do it explicitly here.
5309 * state of a purgeable object. A purgeable object is created via a call to
5310 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
5311 * never be coalesced with any other object -- even other purgeable objects --
5312 * and will thus always remain a distinct object. A purgeable object has
5314 * count is greater than 1, then a purgeable object will behave like a normal
5315 * object and attempts to use this interface will result in an error return
5318 * A purgeable object may be put into a "volatile" state which will make the
5319 * object's pages elligable for being reclaimed without paging to backing
5321 * purgeable object are reclaimed, the purgeable object is said to have been
5322 * "emptied." When a purgeable object is emptied the system will reclaim as
5323 * many pages from the object as it can in a convenient manner (pages already
5325 * a purgeable object is made volatile, its pages will generally be reclaimed
5327 * generally used by applications which can recreate the data in the object
5331 * A purgeable object may be designated as "non-volatile" which means it will
5334 * object was emptied before the object was made non-volatile, that fact will
5335 * be returned as the old state of the purgeable object (see
5336 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
5337 * were reclaimed as part of emptying the object will be refaulted in as
5338 * zero-fill on demand. It is up to the application to note that an object
5340 * purgeable object is made non-volatile, its pages will generally not be paged
5341 * out to backing store in the immediate future. A purgeable object may also
5345 * volatile purgeable object may be queried at any time. This information may
5349 * The specified address may be any address within the purgeable object. If
5350 * the specified address does not represent any object in the target task's
5352 * object containing the specified address is not a purgeable object, then
5358 * state is used to set the new state of the purgeable object and return its
5360 * object is returned in the parameter state.
5365 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
5366 * immediately reclaim as many pages in the object as can be conveniently
5370 * The process of making a purgeable object non-volatile and determining its
5371 * previous state is atomic. Thus, if a purgeable object is made
5373 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
5374 * completely intact and will remain so until the object is made volatile
5375 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
5380 * The object must be locked.
5384 vm_object_t object,
5391 if (object == VM_OBJECT_NULL) {
5399 * Get current state of the purgeable object.
5401 old_state = object->purgable;
5406 assert(object->copy == VM_OBJECT_NULL);
5407 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5421 object->purgable = new_state;
5427 object->resident_page_count);
5428 vm_page_purgeable_count -= object->resident_page_count;
5430 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
5431 purgeable_q_t queue = vm_purgeable_object_remove(object);
5444 object->resident_page_count == 0)
5468 vm_page_purgeable_count += object->resident_page_count;
5472 object->purgable = new_state;
5474 /* object should not be on a queue */
5475 assert(object->objq.next == NULL && object->objq.prev == NULL);
5482 * object. If a new token is added, the most important object' priority is boosted.
5484 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
5486 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
5488 purgeable_q_t old_queue=vm_purgeable_object_remove(object);
5503 vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
5515 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
5516 purgeable_q_t old_queue=vm_purgeable_object_remove(object);
5525 vm_page_purgeable_count += object->resident_page_count;
5527 object->purgable = VM_PURGABLE_VOLATILE;
5528 (void) vm_object_purge(object);
5544 * Called from vm_object_deallocate and when swapping out an object.
5546 * The object is locked, and remains locked throughout the function,
5548 * will be dropped, but not the original object.
5555 vm_object_t object)
5557 vm_object_t orig_object = object;
5560 * from vm_object_deallocate. Original object is never
5563 assert(object->res_count > 0);
5564 while (--object->res_count == 0) {
5565 assert(object->ref_count >= object->res_count);
5566 vm_object_deactivate_all_pages(object);
5568 if (object->shadow != VM_OBJECT_NULL) {
5569 vm_object_t tmp_object = object->shadow;
5571 if (object != orig_object)
5572 vm_object_unlock(object);
5573 object = tmp_object;
5574 assert(object->res_count > 0);
5578 if (object != orig_object)
5579 vm_object_unlock(object);
5585 * Internal function to increment residence count on a vm object
5587 * when swapping in a vm object, via vm_map_swap.
5589 * The object is locked, and remains locked throughout the function,
5591 * will be dropped, but not the original object.
5598 vm_object_t object)
5600 vm_object_t orig_object = object;
5605 while ((++object->res_count == 1) &&
5606 (object->shadow != VM_OBJECT_NULL)) {
5607 vm_object_t tmp_object = object->shadow;
5609 assert(object->ref_count >= object->res_count);
5611 if (object != orig_object)
5612 vm_object_unlock(object);
5613 object = tmp_object;
5615 if (object != orig_object)
5616 vm_object_unlock(object);
5624 * Gets another reference to the given object.
5631 register vm_object_t object)
5633 if (object == VM_OBJECT_NULL)
5636 vm_object_lock(object);
5637 assert(object->ref_count > 0);
5638 vm_object_reference_locked(object);
5639 vm_object_unlock(object);
5648 * have memory object associated with them. Havving this cache too
5715 * Since we're about to mess with the object's backing store,
5717 * to prevent any paging activity on this object, so the caller should
5728 * Same as above for the 2nd object...
5744 * Allocate a temporary VM object to hold object1's contents
5790 * an intermediate object.
5801 * an intermediate object.
5845 /* "Lock" refers to the object not its contents */
5846 /* "ref_count" refers to the object not its contents */
5848 /* "res_count" refers to the object not its contents */
5871 /* "paging_in_progress" refers to the object not its contents */
5874 /* "all_wanted" refers to the object not its contents */
5890 /* "shadowed" refers to the the object not its contents */
5898 /* "shadow_severed" refers to the object not its contents */
5906 /* "msr_q" is linked to the object not its contents */
5924 /* "uplq" refers to the object not its contents (see upl_transpose()) */
5939 * Re-initialize the temporary object to avoid
5993 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
6034 vm_object_lock(object);
6036 if (object->internal)
6037 object_size = object->size;
6038 else if (object->pager != MEMORY_OBJECT_NULL)
6039 vnode_pager_get_object_size(object->pager, &object_size);
6041 goto out; /* pager is gone for this object, nothing more to do */
6053 if (object->pages_used > object->pages_created) {
6058 object->pages_used = object->pages_created = 0;
6060 if ((sequential_run = object->sequential)) {
6074 if (object->internal && fault_info->user_tag == VM_MEMORY_STACK)
6087 if (object->pages_created < 32 * PRE_HEAT_MULTIPLIER) {
6094 pages_unused = object->pages_created - object->pages_used;
6096 if (pages_unused < (object->pages_created / 8)) {
6098 } else if (pages_unused < (object->pages_created / 4)) {
6100 } else if (pages_unused < (object->pages_created / 2)) {
6170 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
6178 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6200 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
6208 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6220 vm_object_unlock(object);
6226 * the UPL regimen but takes place on the VM object rather than on a UPL
6231 vm_object_t object,
6239 vm_object_lock(object);
6242 if(object->phys_contiguous) {
6245 (object->shadow_offset >> PAGE_SHIFT);
6247 vm_object_unlock(object);
6250 vm_object_unlock(object);
6254 if(object->phys_contiguous) {
6255 vm_object_unlock(object);
6260 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
6261 vm_object_unlock(object);
6271 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6375 vm_object_unlock(object);
6392 vm_object_t object,
6401 if (object->resident_page_count == 0) {
6410 vm_object_lock(object);
6412 if (object->phys_contiguous) {
6413 vm_object_unlock(object);
6420 dst_page = vm_page_lookup(object, offset);
6428 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6432 * it might even belong to a different object
6451 vm_object_unlock(object);
6467 vm_object_lock(vm_object_t object)
6469 if (object == vm_pageout_scan_wants_object) {
6473 lck_rw_lock_exclusive(&object->Lock);
6477 vm_object_lock_try(vm_object_t object)
6479 if (object == vm_pageout_scan_wants_object) {
6483 return (lck_rw_try_lock_exclusive(&object->Lock));
6487 vm_object_lock_shared(vm_object_t object)
6489 if (object == vm_pageout_scan_wants_object) {
6493 lck_rw_lock_shared(&object->Lock);
6497 vm_object_lock_try_shared(vm_object_t object)
6499 if (object == vm_pageout_scan_wants_object) {
6503 return (lck_rw_try_lock_shared(&object->Lock));