• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10.1/xnu-2782.1.97/osfmk/vm/

Lines Matching refs:object

62  *	Virtual memory object module.
151 * page of memory exists within exactly one object.
153 * An object is only deallocated when all "references"
156 * Associated with each object is a list of all resident
157 * memory pages belonging to that object; this list is
158 * maintained by the "vm_page" module, but locked by the object's
161 * Each object also records the memory object reference
163 * back data (the memory object, field "pager"), etc...
167 * memory object into a virtual address space (vm_map).
170 * memory object are called "permanent", because all changes
176 * with the object can be discarded once it is no longer
179 * A permanent memory object may be mapped into more
182 * object concurrently. Only one thread is allowed to
186 * necessary fields in the virtual memory object structure.
200 * In this case, the virtual memory object (and its
201 * backing storage -- its memory object) only contain
203 * field refers to the virtual memory object that contains
206 * The "copy" field refers to a virtual memory object
208 * this object, in order to implement another form
211 * The virtual memory object structure also records
212 * the attributes associated with its memory object.
223 vm_object_t object);
226 vm_object_t object);
235 vm_object_t object,
239 vm_object_t object,
250 * memory object (kernel_object) to avoid wasting data structures.
259 * The submap object is used as a placeholder for vm_map_submap
260 * operations. The object is declared in vm_map.c because it
271 * object structure, be sure to add initialization
287 * When an object from this queue is referenced again,
299 * object cache. It must be held when objects are
302 * memory object based on one of the memory object ports
305 * Ideally, the object cache should be more isolated
313 vm_object_t object);
360 vm_object_t object; /* corresponding object */
378 static void vm_object_reap(vm_object_t object);
379 static void vm_object_reap_async(vm_object_t object);
481 * pager / cache object association in the hashtable.
487 vm_object_t object)
491 vm_object_lock_assert_exclusive(object);
497 entry->object = object;
498 object->hashed = TRUE;
509 entry->object = VM_OBJECT_NULL;
525 * Returns a new object with the given size.
531 vm_object_t object)
534 "vm_object_allocate, object 0x%X size 0x%X\n",
535 object, size, 0,0,0);
537 *object = vm_object_template;
538 queue_init(&object->memq);
539 queue_init(&object->msr_q);
541 queue_init(&object->uplq);
543 vm_object_lock_init(object);
544 object->vo_size = size;
553 object,
565 register vm_object_t object;
567 object = (vm_object_t) zalloc(vm_object_zone);
569 // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */
571 if (object != VM_OBJECT_NULL)
572 _vm_object_allocate(size, object);
574 return object;
628 "vm object hash entries");
637 * Fill in a template object, for quick initialization
647 * The lock will be initialized for each allocated object in
760 * Initialize the "kernel object"
784 * Initialize the "submap object". Make it as large as the
785 * kernel object so that no limit is imposed on submap sizes.
799 * Create an "extra" reference to this object so that we never
855 * Finish initializing the kernel object.
886 * Release a reference to the specified object,
889 * are gone, storage associated with this object
892 * No object may be locked.
899 register vm_object_t object)
907 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
908 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
910 if (object == VM_OBJECT_NULL)
913 if (object == kernel_object || object == compressor_object) {
914 vm_object_lock_shared(object);
916 OSAddAtomic(-1, &object->ref_count);
918 if (object->ref_count == 0) {
919 if (object == kernel_object)
924 vm_object_unlock(object);
928 if (object->ref_count == 2 &&
929 object->named) {
931 * This "named" object's reference count is about to
935 } else if (object->ref_count == 2 &&
936 object->internal &&
937 object->shadow != VM_OBJECT_NULL) {
939 * This internal object's reference count is about to
940 * drop from 2 to 1 and it has a shadow object:
941 * we'll want to try and collapse this object with its
944 } else if (object->ref_count >= 2) {
950 * The object currently looks like it is not being
954 * object (cache lock + exclusive object lock).
955 * Lock the object "shared" to make sure we don't race with
958 vm_object_lock_shared(object);
959 ref_count_p = (volatile UInt32 *) &object->ref_count;
960 original_ref_count = object->ref_count;
966 object->named) {
970 object->internal &&
971 object->shadow != VM_OBJECT_NULL) {
982 (UInt32 *) &object->ref_count);
989 vm_object_unlock(object);
1007 while (object != VM_OBJECT_NULL) {
1009 vm_object_lock(object);
1011 assert(object->ref_count > 0);
1014 * If the object has a named reference, and only
1018 if ((object->ref_count == 2) && (object->named)) {
1019 memory_object_t pager = object->pager;
1022 /* more mappers for this object */
1025 vm_object_mapping_wait(object, THREAD_UNINT);
1026 vm_object_mapping_begin(object);
1027 vm_object_unlock(object);
1031 vm_object_lock(object);
1032 vm_object_mapping_end(object);
1034 assert(object->ref_count > 0);
1045 /* if the object is terminating, it cannot go into */
1049 if ((object->ref_count > 1) || object->terminating) {
1050 vm_object_lock_assert_exclusive(object);
1051 object->ref_count--;
1052 vm_object_res_deallocate(object);
1054 if (object->ref_count == 1 &&
1055 object->shadow != VM_OBJECT_NULL) {
1058 * VM object. We can't tell if it's a valid
1060 * object is just part of a possibly stale and
1064 * back to this parent object.
1065 * But we can try and collapse this object with
1068 * We can't bypass this object though, since we
1072 vm_object_collapse(object, 0, FALSE);
1074 vm_object_unlock(object);
1077 ((object = vm_object_cache_trim(TRUE)) !=
1087 * before destroying or caching the object.
1090 if (object->pager_created && ! object->pager_initialized) {
1091 assert(! object->can_persist);
1092 vm_object_assert_wait(object,
1095 vm_object_unlock(object);
1103 * If this object can persist, then enter it in
1112 if ((object->can_persist) && (object->alive)) {
1118 vm_object_lock_assert_exclusive(object);
1119 if (--object->ref_count > 0) {
1120 vm_object_res_deallocate(object);
1121 vm_object_unlock(object);
1124 ((object = vm_object_cache_trim(TRUE)) !=
1137 shadow = object->shadow;
1138 object->shadow = VM_OBJECT_NULL;
1143 * Enter the object onto the queue of
1147 assert(object->shadow == VM_OBJECT_NULL);
1148 VM_OBJ_RES_DECR(object);
1151 object,
1156 vm_object_unlock(object);
1165 * holding a lock on this object while
1167 * object
1169 if (vm_object_lock_try(object))
1179 queue_enter(&vm_object_cached_list, object,
1183 vm_object_deactivate_all_pages(object);
1184 vm_object_unlock(object);
1193 object = shadow;
1207 object = vm_object_cache_trim(TRUE);
1208 if (object == VM_OBJECT_NULL) {
1216 * This object is not cachable; terminate it.
1220 object, object->resident_page_count,
1221 object->paging_in_progress,
1222 (void *)current_thread(),object->ref_count);
1224 VM_OBJ_RES_DECR(object); /* XXX ? */
1226 * Terminate this object. If it had a shadow,
1233 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
1235 if (vm_object_terminate(object) != KERN_SUCCESS) {
1239 object = shadow;
1244 ((object = vm_object_cache_trim(TRUE)) !=
1261 vm_object_t object)
1267 vm_object_lock_assert_exclusive(object);
1269 next_p = (vm_page_t)queue_first(&object->memq);
1270 p_limit = MIN(50, object->resident_page_count);
1272 while (!queue_end(&object->memq, (queue_entry_t)next_p) && --p_limit > 0) {
1320 queue_remove(&object->memq, p, vm_page_t, listq);
1321 queue_enter(&object->memq, p, vm_page_t, listq);
1353 vm_object_t object)
1355 queue_remove(&vm_object_cached_list, object, vm_object_t, objq);
1356 object->objq.next = NULL;
1357 object->objq.prev = NULL;
1364 vm_object_t object)
1368 if (object->objq.next || object->objq.prev)
1369 vm_object_cache_remove_locked(object);
1376 vm_object_t object)
1381 if (object->resident_page_count == 0)
1387 if (object->objq.next == NULL && object->objq.prev == NULL) {
1388 queue_enter(&vm_object_cached_list, object, vm_object_t, objq);
1389 object->vo_cache_ts = sec + EVICT_AGE;
1390 object->vo_cache_pages_to_scan = object->resident_page_count;
1403 vm_object_t object = VM_OBJECT_NULL;
1431 * the object on the head of the queue has not
1440 * and lock an object on the cached list
1451 object = next_obj;
1454 if (sec < object->vo_cache_ts) {
1455 KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
1457 vm_object_cache_aging_ts = object->vo_cache_ts;
1458 object = VM_OBJECT_NULL;
1461 if (!vm_object_lock_try_scan(object)) {
1464 * an object to steal pages from, we'll revist in a bit...
1467 KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0);
1469 object = VM_OBJECT_NULL;
1472 if (queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
1475 * so deal with it... if we don't remove the object from
1478 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1480 vm_object_cache_remove_locked(object);
1481 vm_object_unlock(object);
1482 object = VM_OBJECT_NULL;
1486 * we have a locked object with pages...
1493 if (object == VM_OBJECT_NULL)
1497 * object is locked at this point and
1500 next_p = (vm_page_t)queue_first(&object->memq);
1514 while (!queue_end(&object->memq, (queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
1519 object->vo_cache_pages_to_scan--;
1522 queue_remove(&object->memq, p, vm_page_t, listq);
1523 queue_enter(&object->memq, p, vm_page_t, listq);
1529 queue_remove(&object->memq, p, vm_page_t, listq);
1530 queue_enter(&object->memq, p, vm_page_t, listq);
1536 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0);
1577 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0);
1583 if (object->vo_cache_pages_to_scan == 0) {
1584 KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0);
1586 vm_object_cache_remove(object);
1588 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1591 * done with this object
1593 vm_object_unlock(object);
1594 object = VM_OBJECT_NULL;
1626 * down the cache. If so, remove an object from
1635 register vm_object_t object = VM_OBJECT_NULL;
1655 * the first object in the cache.
1662 object = (vm_object_t) queue_first(&vm_object_cached_list);
1663 if(object == (vm_object_t) &vm_object_cached_list) {
1673 vm_object_lock(object);
1674 queue_remove(&vm_object_cached_list, object, vm_object_t,
1680 * Since this object is in the cache, we know
1685 assert(object->pager_initialized);
1686 assert(object->ref_count == 0);
1687 vm_object_lock_assert_exclusive(object);
1688 object->ref_count++;
1691 * Terminate the object.
1692 * If the object had a shadow, we let vm_object_deallocate
1698 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
1700 if(vm_object_terminate(object) != KERN_SUCCESS)
1720 * Upon entry, the object must be locked,
1721 * and the object must have exactly one reference.
1723 * The shadow object reference is left alone.
1725 * The object must be unlocked if its found that pages
1726 * must be flushed to a backing object. If someone
1727 * manages to map the object while it is being flushed
1728 * the object is returned unlocked and unchanged. Otherwise,
1730 * object will cease to exist.
1734 vm_object_t object)
1738 XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n",
1739 object, object->ref_count, 0, 0, 0);
1741 if (!object->pageout && (!object->temporary || object->can_persist) &&
1742 (object->pager != NULL || object->shadow_severed)) {
1745 * out of the object instead of cleaned in place. This
1748 object->pager_trusted = FALSE;
1750 vm_object_reap_pages(object, REAP_TERMINATE);
1753 * Make sure the object isn't already being terminated
1755 if (object->terminating) {
1756 vm_object_lock_assert_exclusive(object);
1757 object->ref_count--;
1758 assert(object->ref_count > 0);
1759 vm_object_unlock(object);
1764 * Did somebody get a reference to the object while we were
1767 if (object->ref_count != 1) {
1768 vm_object_lock_assert_exclusive(object);
1769 object->ref_count--;
1770 assert(object->ref_count > 0);
1771 vm_object_res_deallocate(object);
1772 vm_object_unlock(object);
1780 object->terminating = TRUE;
1781 object->alive = FALSE;
1783 if ( !object->internal && (object->objq.next || object->objq.prev))
1784 vm_object_cache_remove(object);
1786 if (object->hashed) {
1789 lck = vm_object_hash_lock_spin(object->pager);
1790 vm_object_remove(object);
1794 * Detach the object from its shadow if we are the shadow's
1798 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1799 !(object->pageout)) {
1801 if (shadow_object->copy == object)
1806 if (object->paging_in_progress != 0 ||
1807 object->activity_in_progress != 0) {
1810 * on this object, meaning that there are some paging
1811 * or other I/O operations in progress for this VM object.
1813 * up front to ensure that the object doesn't go away, but
1814 * they may also need to acquire a reference on the VM object,
1817 * object, triggering its termination, while still holding
1823 * complete the VM object termination if it still holds
1827 * VM object is "terminating" and not "alive".
1829 vm_object_reap_async(object);
1830 vm_object_unlock(object);
1834 * object's reference on its shadow object yet.
1836 * completed this object's termination.
1841 * complete the VM object termination
1843 vm_object_reap(object);
1844 object = VM_OBJECT_NULL;
1847 * the object lock was released by vm_object_reap()
1849 * KERN_SUCCESS means that this object has been terminated
1850 * and no longer needs its shadow object but still holds a
1863 * Complete the termination of a VM object after it's been marked
1866 * The VM object must be locked by caller.
1867 * The lock will be released on return and the VM object is no longer valid.
1871 vm_object_t object)
1875 vm_object_lock_assert_exclusive(object);
1876 assert(object->paging_in_progress == 0);
1877 assert(object->activity_in_progress == 0);
1882 * Disown this purgeable object to cleanup its owner's purgeable
1883 * ledgers. We need to do this before disconnecting the object
1886 if (object->internal &&
1887 object->purgable != VM_PURGABLE_DENY) {
1888 vm_purgeable_accounting(object,
1889 object->purgable,
1893 pager = object->pager;
1894 object->pager = MEMORY_OBJECT_NULL;
1897 memory_object_control_disable(object->pager_control);
1899 object->ref_count--;
1901 assert(object->res_count == 0);
1904 assert (object->ref_count == 0);
1909 if (object->internal) {
1912 owner = object->vo_purgeable_owner;
1914 if (object->purgable == VM_PURGABLE_DENY) {
1916 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
1919 assert(object->vo_purgeable_owner == NULL);
1921 queue = vm_purgeable_object_remove(object);
1924 if (object->purgeable_when_ripe) {
1938 * object as VM_PURGABLE_EMPTY to avoid updating
1943 assert(object->resident_page_count >=
1944 object->wired_page_count);
1945 delta = (object->resident_page_count -
1946 object->wired_page_count);
1952 if (object->wired_page_count != 0) {
1954 object->wired_page_count);
1955 OSAddAtomic(-object->wired_page_count,
1958 object->purgable = VM_PURGABLE_EMPTY;
1960 else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
1961 object->purgable == VM_PURGABLE_EMPTY) {
1963 assert(object->vo_purgeable_owner == TASK_NULL);
1964 vm_purgeable_nonvolatile_dequeue(object);
1966 panic("object %p in unexpected purgeable state 0x%x\n",
1967 object, object->purgable);
1969 assert(object->objq.next == NULL);
1970 assert(object->objq.prev == NULL);
1976 * if some faults on this object were aborted.
1978 if (object->pageout) {
1979 assert(object->shadow != VM_OBJECT_NULL);
1981 vm_pageout_object_terminate(object);
1983 } else if (((object->temporary && !object->can_persist) || (pager == MEMORY_OBJECT_NULL))) {
1985 vm_object_reap_pages(object, REAP_REAP);
1987 assert(queue_empty(&object->memq));
1988 assert(object->paging_in_progress == 0);
1989 assert(object->activity_in_progress == 0);
1990 assert(object->ref_count == 0);
1998 vm_object_unlock(object);
1999 vm_object_release_pager(pager, object->hashed);
2000 vm_object_lock(object);
2004 object->terminating = FALSE;
2005 vm_object_paging_begin(object);
2006 vm_object_paging_end(object);
2007 vm_object_unlock(object);
2010 vm_external_destroy(object->existence_map, object->vo_size);
2013 object->shadow = VM_OBJECT_NULL;
2018 object);
2022 vm_object_lock_destroy(object);
2024 * Free the space for the object.
2026 zfree(vm_object_zone, object);
2027 object = VM_OBJECT_NULL;
2059 vm_object_t object,
2087 if (queue_empty(&object->memq))
2096 next = (vm_page_t)queue_first(&object->memq);
2098 while (!queue_end(&object->memq, (queue_entry_t)next)) {
2138 PAGE_SLEEP(object, p, THREAD_UNINT);
2156 * it on the object to honor the 'wire' contract
2195 assert(p->object != kernel_object);
2228 if ((p->dirty || p->precious) && !p->error && object->alive) {
2245 vm_object_paging_wait(object, THREAD_UNINT);
2278 vm_object_t object)
2280 vm_object_lock_assert_exclusive(object);
2286 /* enqueue the VM object... */
2287 queue_enter(&vm_object_reaper_queue, object,
2300 vm_object_t object, shadow_object;
2306 object,
2311 vm_object_lock(object);
2313 assert(object->terminating);
2314 assert(!object->alive);
2318 * Now that the object is dead, it won't touch any more
2324 while (object->paging_in_progress != 0 ||
2325 object->activity_in_progress != 0) {
2326 vm_object_wait(object,
2329 vm_object_lock(object);
2333 object->pageout ? VM_OBJECT_NULL : object->shadow;
2335 vm_object_reap(object);
2336 /* cache is unlocked and object is no longer valid */
2337 object = VM_OBJECT_NULL;
2341 * Drop the reference "object" was holding on
2342 * its shadow object.
2426 * Shut down a VM object, despite the
2432 vm_object_t object,
2437 if (object == VM_OBJECT_NULL)
2449 vm_object_lock(object);
2450 object->can_persist = FALSE;
2451 object->named = FALSE;
2452 object->alive = FALSE;
2454 if (object->hashed) {
2459 lck = vm_object_hash_lock_spin(object->pager);
2460 vm_object_remove(object);
2463 old_pager = object->pager;
2464 object->pager = MEMORY_OBJECT_NULL;
2466 memory_object_control_disable(object->pager_control);
2473 vm_object_paging_wait(object, THREAD_UNINT);
2474 vm_object_unlock(object);
2477 * Terminate the object now.
2480 vm_object_release_pager(old_pager, object->hashed);
2489 vm_object_deallocate(object);
2506 * Deactivate all pages in the specified object. (Keep its pages
2509 * The object must be locked.
2513 register vm_object_t object)
2527 queue_iterate(&object->memq, p, vm_page_t, listq) {
2563 * want to deactive the ones at the top most level in the object chain. In order to do
2617 * while processing a higher level object in the shadow chain.
2639 vm_object_t object,
2651 if (object->existence_map) {
2652 if (vm_external_state_get(object->existence_map, offset)
2662 if (object->internal &&
2663 object->alive &&
2664 !object->terminating &&
2665 object->pager_ready) {
2668 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
2678 * so the object can't disappear when we release the lock.
2681 assert(object->paging_in_progress);
2682 pager = object->pager;
2683 vm_object_unlock(object);
2687 offset + object->paging_offset,
2692 vm_object_lock(object);
2724 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2731 vm_object_t object,
2762 * If this offset has already been found and handled in a higher level object, then don't
2763 * do anything with it in the current shadow object.
2774 if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2794 if ((kill_page) && (object->internal)) {
2818 vm_external_state_clr(object->existence_map, offset);
2820 VM_COMPRESSOR_PAGER_STATE_CLR(object,
2825 assert(!object->all_reusable);
2827 object->reusable_page_count++;
2828 assert(object->resident_page_count >= object->reusable_page_count);
2858 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
2873 if (page_is_paged_out(object, offset)) {
2881 if ((kill_page) && (object->internal)) {
2883 vm_external_state_clr(object->existence_map, offset);
2885 VM_COMPRESSOR_PAGER_STATE_CLR(object,
2899 vm_page_do_delayed_work(object, &dw_array[0], dw_count);
2904 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2922 vm_object_t object;
2937 * a shadow chain on this object. At this point, we haven't done anything with this
2942 object = orig_object;
2945 * Start at the top level object and iterate around the loop once for each object
2950 while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2951 vm_object_paging_begin(object);
2953 deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state, pfc);
2955 vm_object_paging_end(object);
2958 * We've finished with this object, see if there's a shadow object. If
2959 * there is, update the offset and lock the new object. We also turn off
2960 * kill_page at this point since we only kill pages in the top most object.
2963 tmp_object = object->shadow;
2969 offset += object->vo_shadow_offset;
2973 if (object != orig_object)
2974 vm_object_unlock(object);
2976 object = tmp_object;
2979 if (object && object != orig_object)
2980 vm_object_unlock(object);
2995 vm_object_t object,
3022 object->internal &&
3023 object->vo_size != 0 &&
3024 object->vo_size == size &&
3025 object->reusable_page_count == 0) {
3031 if ((reusable_page || all_reusable) && object->all_reusable) {
3040 length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable, &pmap_flush_context_storage);
3048 if (!object->all_reusable) {
3051 object->all_reusable = TRUE;
3052 assert(object->reusable_page_count == 0);
3054 reusable = object->resident_page_count;
3067 vm_object_t object,
3076 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
3080 assert((object)->reusable_page_count <= \
3081 (object)->resident_page_count); \
3082 assert((object)->reusable_page_count > 0); \
3083 (object)->reusable_page_count--; \
3103 vm_object_lock_assert_exclusive(object);
3105 if (object->all_reusable) {
3106 panic("object %p all_reusable: can't update pmap stats\n",
3107 object);
3108 assert(object->reusable_page_count == 0);
3109 object->all_reusable = FALSE;
3110 if (end_offset - start_offset == object->vo_size ||
3113 reused = object->resident_page_count;
3116 queue_iterate(&object->memq, m, vm_page_t, listq) {
3120 object->reusable_page_count++;
3121 assert(object->resident_page_count >= object->reusable_page_count);
3129 } else if (object->resident_page_count >
3135 if (object->reusable_page_count == 0) {
3138 m = vm_page_lookup(object, cur_offset);
3139 VM_OBJECT_REUSE_PAGE(object, m, reused);
3143 queue_iterate(&object->memq, m, vm_page_t, listq) {
3144 if (object->reusable_page_count == 0) {
3151 VM_OBJECT_REUSE_PAGE(object, m, reused);
3166 * pages in the specified object range.
3170 * the top-level object; only those pages may
3174 * shadow chain from the top-level object to
3177 * The object must *not* be locked. The object must
3187 register vm_object_t object,
3194 vm_object_pmap_protect_options(object, offset, size,
3200 register vm_object_t object,
3211 if (object == VM_OBJECT_NULL)
3216 vm_object_lock(object);
3218 if (object->phys_contiguous) {
3220 vm_object_unlock(object);
3230 phys_start = object->vo_shadow_offset + offset;
3233 assert(phys_end <= object->vo_shadow_offset + object->vo_size);
3234 vm_object_unlock(object);
3255 assert(object->internal);
3258 if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) {
3259 vm_object_unlock(object);
3273 if (ptoa_64(object->resident_page_count / 4) < size) {
3279 queue_iterate(&object->memq, p, vm_page_t, listq) {
3313 p = vm_page_lookup(object, target_off);
3348 next_object = object->shadow;
3350 offset += object->vo_shadow_offset;
3352 vm_object_unlock(object);
3353 object = next_object;
3371 vm_object_unlock(object);
3379 * virtual memory object without using
3386 * for the source virtual memory object. The source
3387 * object will be returned *unlocked*.
3397 * A new virtual memory object is returned in a
3399 * new object, starting at a zero offset, are a copy
3427 * Prevent destruction of the source object while we copy.
3434 * Create a new object to hold the copied pages.
3436 * We fill the new object starting at offset 0,
3438 * We don't bother to lock the new object within
3518 * Copy the page to the new object.
3527 vm_object_unlock(result_page->object);
3538 vm_object_lock(result_page->object);
3554 vm_fault_cleanup(result_page->object,
3587 * (b) return the null object if
3609 * Lose the extra reference, and return our object.
3621 * memory object, if it can be done without waiting
3630 * The object should be unlocked on entry and exit.
3642 vm_object_t object = *_object;
3647 if (object == VM_OBJECT_NULL) {
3653 vm_object_lock(object);
3655 copy_strategy = object->copy_strategy;
3662 * Make another reference to the object.
3663 * Leave object/offset unchanged.
3666 vm_object_reference_locked(object);
3667 object->shadowed = TRUE;
3668 vm_object_unlock(object);
3682 vm_object_unlock(object);
3686 vm_object_unlock(object);
3700 * Copy the source object (src_object), using the
3704 * The source object must be locked on entry. It
3709 * A new object that represents the copied virtual
3737 * vm object structure? Depends how common this case it.
3757 * Ask the memory manager to give us a memory object
3758 * which represents a copy of the src object.
3759 * The memory manager may give us a memory object
3761 * new memory object. This memory object will arrive
3825 * Copy the specified virtual memory object, using
3853 * to this object, but it has promised not to make any changes on
3857 * Create a new object, called a "copy object" to hold
3860 * Record the original object as the backing object for
3861 * the copy object. If the original mapping does not
3863 * Record the copy object in the original object.
3866 * the copy object.
3867 * Mark the new mapping (the copy object) copy-on-write.
3868 * This makes the copy object itself read-only, allowing
3874 * object is *not* marked copy-on-write. A copied page is pushed
3875 * to the copy object, regardless which party attempted to modify
3879 * original object has not been changed since the last copy, its
3880 * copy object can be reused. Otherwise, a new copy object can be
3881 * inserted between the original object and its previous copy
3882 * object. Since any copy object is read-only, this cannot affect
3883 * affect the contents of the previous copy object.
3885 * Note that a copy object is higher in the object tree than the
3886 * original object; therefore, use of the copy object recorded in
3887 * the original object must be done carefully, to avoid deadlock.
3945 * Determine whether the old copy object has
3955 * the existing copy-object if
4027 * copy object will be large enough to back either the
4028 * old copy object or the new mapping.
4046 * The copy-object is always made large enough to
4047 * completely shadow the original object, since
4049 * the original object at different points.
4066 * We now have the src object locked, and the new copy object
4106 * Make the old copy-object shadow the new one.
4108 * object.
4132 * Point the new copy at the existing object.
4146 "vm_object_copy_delayed: used copy object %X for source %X\n",
4156 * Perform a copy according to the source object's
4261 * Create a new object which is backed by the
4262 * specified existing object range. The source
4263 * object reference is deallocated.
4265 * The new object and offset into that object
4272 vm_object_t *object, /* IN/OUT */
4279 source = *object;
4288 * due to a combination of vm_remap() that changes a VM object's
4304 * If the source object is larger than what we are trying
4307 * collapse the underlying object away in the future
4322 * Allocate a new object with the given length
4326 panic("vm_object_shadow: no object for shadowing");
4329 * The new object shadows the source object, adding
4331 * to point to the new object, removing a reference to
4332 * the source object. Net result: no change of reference
4338 * Store the offset into the source object,
4339 * and fix up the offset into the new object.
4349 *object = result;
4368 * the memory object control port,
4374 * is asserted. Other mappings using a particular memory object,
4383 * internal object initialization or destruction. [Furthermore,
4395 * the object cannot (or will not) be cached.
4402 * object. [The memory manager may not want to
4403 * destroy the memory object, but may wish to
4408 * the pager field and release the memory object references.
4412 * In addition to the lock on the object, the vm_object_hash_lock
4417 * cannot be used to determine whether a memory object has
4419 * knowledge is important to the shadow object mechanism.]
4436 * Find a VM object corresponding to the given
4437 * pager; if no such object exists, create one,
4448 register vm_object_t object;
4463 * Look for an object associated with this port.
4473 * We must unlock to create a new object;
4484 * to insert; set the object.
4494 } else if (entry->object == VM_OBJECT_NULL) {
4496 * If a previous object is being terminated,
4510 object = entry->object;
4511 assert(object != VM_OBJECT_NULL);
4514 if ( !vm_object_lock_try(object)) {
4522 assert(!internal || object->internal);
4524 if (object->ref_count == 0) {
4528 vm_object_unlock(object);
4536 object,
4539 queue_remove(&vm_object_cached_list, object,
4547 assert(!object->named);
4548 object->named = TRUE;
4550 vm_object_lock_assert_exclusive(object);
4551 object->ref_count++;
4552 vm_object_res_reference(object);
4555 vm_object_unlock(object);
4561 assert(object->ref_count > 0);
4567 pager, object, must_init, 0, 0);
4587 control = memory_object_control_allocate(object);
4590 vm_object_lock(object);
4591 assert(object != kernel_object);
4598 object->pager_created = TRUE;
4599 object->pager = pager;
4600 object->internal = internal;
4601 object->pager_trusted = internal;
4604 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
4606 object->pager_control = control;
4607 object->pager_ready = FALSE;
4609 vm_object_unlock(object);
4616 object->pager_control,
4619 vm_object_lock(object);
4621 object->named = TRUE;
4623 object->pager_ready = TRUE;
4624 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
4627 object->pager_initialized = TRUE;
4628 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
4630 vm_object_lock(object);
4634 * [At this point, the object must be locked]
4639 * thread to map this object.
4642 while (!object->pager_initialized) {
4643 vm_object_sleep(object,
4647 vm_object_unlock(object);
4651 object, object->pager, internal, 0,0);
4652 return(object);
4658 * Create a memory object for an internal object.
4660 * The object is locked on entry and exit;
4664 * vm_object_pager_create on an object at
4671 register vm_object_t object)
4681 XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n",
4682 object, 0,0,0,0);
4684 assert(object != kernel_object);
4693 vm_object_paging_begin(object);
4694 if (object->pager_created) {
4699 while (!object->pager_initialized) {
4700 vm_object_sleep(object,
4704 vm_object_paging_end(object);
4709 * Indicate that a memory object has been assigned
4713 object->pager_created = TRUE;
4714 object->paging_offset = 0;
4717 size = object->vo_size;
4719 vm_object_unlock(object);
4724 vm_object_lock(object);
4725 assert(object->vo_size == size);
4726 object->existence_map = map;
4727 vm_object_unlock(object);
4731 if ((uint32_t) object->vo_size != object->vo_size) {
4732 panic("vm_object_pager_create(): object size 0x%llx >= 4GB\n",
4733 (uint64_t) object->vo_size);
4737 * Create the [internal] pager, and associate it with this object.
4740 * can look up the object to complete initializing it. No
4741 * user will ever map this object.
4749 assert(object->temporary);
4751 /* create our new memory object */
4752 assert((vm_size_t) object->vo_size == object->vo_size);
4753 (void) memory_object_create(dmm, (vm_size_t) object->vo_size,
4761 vm_object_lock(object);
4763 vm_object_hash_insert(entry, object);
4765 vm_object_unlock(object);
4773 if (vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE) != object)
4781 vm_object_lock(object);
4786 vm_object_paging_end(object);
4791 register vm_object_t object)
4798 assert(object != kernel_object);
4804 vm_object_paging_begin(object);
4805 if (object->pager_created) {
4810 while (!object->pager_initialized) {
4811 vm_object_sleep(object,
4815 vm_object_paging_end(object);
4820 * Indicate that a memory object has been assigned
4824 object->pager_created = TRUE;
4825 object->paging_offset = 0;
4827 vm_object_unlock(object);
4829 if ((uint32_t) (object->vo_size/PAGE_SIZE) !=
4830 (object->vo_size/PAGE_SIZE)) {
4832 "object size 0x%llx >= 0x%llx\n",
4833 object,
4834 (uint64_t) object->vo_size,
4839 * Create the [internal] pager, and associate it with this object.
4842 * can look up the object to complete initializing it. No
4843 * user will ever map this object.
4846 assert(object->temporary);
4848 /* create our new memory object */
4849 assert((uint32_t) (object->vo_size/PAGE_SIZE) ==
4850 (object->vo_size/PAGE_SIZE));
4852 (memory_object_size_t) object->vo_size,
4856 "no pager for object %p size 0x%llx\n",
4857 object, (uint64_t) object->vo_size);
4863 vm_object_lock(object);
4865 vm_object_hash_insert(entry, object);
4867 vm_object_unlock(object);
4875 pager_object = vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE);
4877 if (pager_object != object) {
4878 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager, pager_object, object, (uint64_t) object->vo_size);
4886 vm_object_lock(object);
4891 vm_object_paging_end(object);
4897 * Eliminate the pager/object association
4900 * The object cache must be locked.
4904 vm_object_t object)
4908 if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
4913 entry->object = VM_OBJECT_NULL;
4937 void vm_object_do_collapse_compressor(vm_object_t object,
4941 vm_object_t object,
4949 vm_object_lock_assert_exclusive(object);
4952 size = object->vo_size;
4959 for (backing_offset = object->vo_shadow_offset;
4960 backing_offset < object->vo_shadow_offset + object->vo_size;
4977 new_offset = backing_offset - object->vo_shadow_offset;
4979 if (new_offset >= object->vo_size) {
4980 /* we're out of the scope of "object": done */
4984 if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
4985 (vm_compressor_pager_state_get(object->pager,
4987 object->paging_offset)) ==
4990 * This page already exists in object, resident or
5001 * we need to transfer it to object.
5006 object->pager,
5007 (new_offset + object->paging_offset),
5017 * Collapse an object with the object backing it.
5018 * Pages in the backing object are moved into the
5019 * parent, and the backing object is deallocated.
5027 vm_object_t object,
5034 vm_object_lock_assert_exclusive(object);
5037 assert(object->purgable == VM_PURGABLE_DENY);
5040 backing_offset = object->vo_shadow_offset;
5041 size = object->vo_size;
5073 * object collapse, so we can just move an encrypted
5074 * page from one object to the other in this case.
5076 * the object lock.
5081 pp = vm_page_lookup(object, new_offset);
5084 if (VM_COMPRESSOR_PAGER_STATE_GET(object,
5088 * Parent object has this page
5091 * object's page.
5097 * Move the backing object's page
5100 vm_page_rename(p, object, new_offset,
5120 vm_page_rename(p, object, new_offset, TRUE);
5126 * Parent object has a real page.
5127 * Throw away the backing object's
5136 object->pager != MEMORY_OBJECT_NULL &&
5139 /* move compressed pages from backing_object to object */
5140 vm_object_do_collapse_compressor(object, backing_object);
5146 assert((!object->pager_created &&
5147 (object->pager == MEMORY_OBJECT_NULL)) ||
5151 assert(!object->pager_created &&
5152 object->pager == MEMORY_OBJECT_NULL);
5156 * Move the pager from backing_object to object.
5163 assert(!object->paging_in_progress);
5164 assert(!object->activity_in_progress);
5165 assert(!object->pager_created);
5166 assert(object->pager == NULL);
5167 object->pager = backing_object->pager;
5173 entry = vm_object_hash_lookup(object->pager, FALSE);
5175 entry->object = object;
5178 object->hashed = TRUE;
5180 object->pager_created = backing_object->pager_created;
5181 object->pager_control = backing_object->pager_control;
5182 object->pager_ready = backing_object->pager_ready;
5183 object->pager_initialized = backing_object->pager_initialized;
5184 object->paging_offset =
5186 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
5187 memory_object_control_collapse(object->pager_control,
5188 object);
5201 * the backing object if there is one. If the shadow offset is
5206 * old map, giving the collapsed object no map. This means that
5211 assert(object->existence_map == VM_EXTERNAL_NULL);
5219 object->existence_map = backing_object->existence_map;
5227 * moves from within backing_object to within object.
5230 assert(!object->phys_contiguous);
5232 object->shadow = backing_object->shadow;
5233 if (object->shadow) {
5234 object->vo_shadow_offset += backing_object->vo_shadow_offset;
5235 /* "backing_object" gave its shadow to "object" */
5240 object->vo_shadow_offset = 0;
5242 assert((object->shadow == VM_OBJECT_NULL) ||
5243 (object->shadow->copy != backing_object));
5248 * Since the backing object has no pages, no
5249 * pager left, and no object references within it,
5293 vm_object_t object,
5297 * Make the parent shadow the next object
5301 vm_object_lock_assert_exclusive(object);
5306 * Do object reference in-line to
5308 * residence count. If object is not
5316 if (object->res_count != 0)
5324 assert(!object->phys_contiguous);
5326 object->shadow = backing_object->shadow;
5327 if (object->shadow) {
5328 object->vo_shadow_offset += backing_object->vo_shadow_offset;
5331 object->vo_shadow_offset = 0;
5335 * Backing object might have had a copy pointer
5338 if (backing_object->copy == object) {
5350 * The res_count on the backing object is
5353 * a "swapped" object, which has a 0 res_count,
5354 * in which case, the backing object res_count
5361 * backing object could be bypassed but not
5362 * collapsed, such as when the backing object
5371 if (object->res_count != 0)
5380 * the backing object.
5384 if (object->res_count == 0) {
5392 * valid reference is held on the object... w/o a valid
5394 * regret it) to unlock the object and then retake the lock
5395 * since the object may be terminated and recycled in between.
5396 * The "activity_in_progress" reference will keep the object
5399 vm_object_activity_begin(object);
5400 vm_object_unlock(object);
5406 * Relock object. We don't have to reverify
5412 vm_object_lock(object);
5413 vm_object_activity_end(object);
5423 * Perform an object collapse or an object bypass if appropriate.
5427 * Requires that the object be locked and the page queues be unlocked.
5437 register vm_object_t object,
5456 object, 0,0,0,0);
5458 if (object == VM_OBJECT_NULL)
5461 original_object = object;
5464 * The top object was locked "exclusive" by the caller.
5473 object = original_object;
5474 vm_object_lock_assert_exclusive(object);
5484 * There is a backing object, and
5487 backing_object = object->shadow;
5489 if (object != original_object) {
5490 vm_object_unlock(object);
5501 * No pages in the object are currently
5504 if (object->paging_in_progress != 0 ||
5505 object->activity_in_progress != 0) {
5507 if (object != original_object) {
5508 vm_object_unlock(object);
5510 object = backing_object;
5517 * The backing object is not read_only,
5518 * and no pages in the backing object are
5520 * The backing object is internal.
5528 if (object != original_object) {
5529 vm_object_unlock(object);
5531 object = backing_object;
5539 * any shadow objects or be a shadow object to another
5540 * object.
5541 * Collapsing a purgeable object would require some
5544 if (object->purgable != VM_PURGABLE_DENY ||
5547 "purgeable object: %p(%d) %p(%d)\n",
5548 object, object->purgable,
5551 if (object != original_object) {
5552 vm_object_unlock(object);
5554 object = backing_object;
5560 * The backing object can't be a copy-object:
5561 * the shadow_offset for the copy-object must stay
5564 * just shadow the next object in the chain, old
5565 * pages from that object would then have to be copied
5567 * parent object.
5572 if (object != original_object) {
5573 vm_object_unlock(object);
5575 object = backing_object;
5582 * object (if the parent is the only reference to
5587 * object, we may be able to collapse it into the
5602 !object->pager_created
5613 * We have an object and its shadow locked
5626 if (object != original_object)
5627 vm_object_unlock(object);
5635 backing_object, object,
5640 * Collapse the object with its backing
5641 * object, and try again with the object's
5642 * new backing object.
5645 vm_object_do_collapse(object, backing_object);
5651 * Collapsing the backing object was not possible
5657 if (object != original_object) {
5658 vm_object_unlock(object);
5660 object = backing_object;
5667 * If the object doesn't have all its pages present,
5668 * we have to make sure no pages in the backing object
5671 size = (unsigned int)atop(object->vo_size);
5672 rcount = object->resident_page_count;
5680 * If the backing object has a pager but no pagemap,
5690 if (object != original_object) {
5691 vm_object_unlock(object);
5693 object = backing_object;
5699 * If the object has a pager but no pagemap,
5703 if (object->pager_created
5705 && (object->existence_map == VM_EXTERNAL_NULL)
5709 if (object != original_object) {
5710 vm_object_unlock(object);
5712 object = backing_object;
5717 backing_offset = object->vo_shadow_offset;
5722 * we have enough pages in the backing object to guarantee that
5724 * in the object we're evaluating, so move on and
5727 if (object != original_object) {
5728 vm_object_unlock(object);
5730 object = backing_object;
5736 * If all of the pages in the backing object are
5737 * shadowed by the parent object, the parent
5738 * object no longer has to shadow the backing
5739 * object; it can shadow the next one in the
5742 * If the backing object has existence info,
5767 if (object->cow_hint != ~(vm_offset_t)0)
5768 hint_offset = (vm_object_offset_t)object->cow_hint;
5775 !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
5777 object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
5779 if (object != original_object) {
5780 vm_object_unlock(object);
5782 object = backing_object;
5788 * If the object's window onto the backing_object
5790 * pages in the backing object, it makes sense to
5795 * walking the backing object's resident page list
5810 if (offset < object->vo_size &&
5812 !EXISTS_IN_OBJECT(object, offset, rc)) {
5814 object->cow_hint = (vm_offset_t) offset; /* atomic */
5823 if (object != original_object) {
5824 vm_object_unlock(object);
5826 object = backing_object;
5834 * backing object that show through to the object.
5844 (offset + PAGE_SIZE_64 < object->vo_size) ?
5849 !EXISTS_IN_OBJECT(object, offset, rcount)) {
5851 object->cow_hint = (vm_offset_t) offset; /* atomic */
5857 if (object != original_object) {
5858 vm_object_unlock(object);
5860 object = backing_object;
5872 if (object != original_object)
5873 vm_object_unlock(object);
5880 object->cow_hint = (vm_offset_t)0;
5883 * All interesting pages in the backing object
5885 * Thus we can bypass the backing object.
5888 vm_object_do_bypass(object, backing_object);
5892 * Try again with this object's new backing object.
5900 if (object != original_object) {
5901 vm_object_unlock(object);
5910 * object range from the object's list of pages.
5913 * The object must be locked.
5914 * The object must not have paging_in_progress, usually
5922 register vm_object_t object,
5934 if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
5938 p = vm_page_lookup(object, start);
5949 p = (vm_page_t) queue_first(&object->memq);
5950 while (!queue_end(&object->memq, (queue_entry_t) p)) {
5967 * regions of memory into a single object.
5971 * NOTE: Only works at the moment if the second object is NULL -
5972 * if it's not, which object do we lock first?
5975 * prev_object First object to coalesce
5977 * next_object Second object into coalesce
5984 * The object(s) must *not* be locked. The map must be locked
5985 * to preserve the reference to the object(s).
6019 * Try to collapse the object first
6028 * . shadows another object
6049 * Remove any pages that may still be in the object from
6057 * Extend the object if necessary.
6063 * We cannot extend an object that has existence info,
6065 * the entire object.
6067 * This assertion must be true because the object
6081 * Attach a set of physical pages to an object, so that they can
6082 * be mapped by mapping the object. Typically used to map IO memory.
6089 vm_object_t object,
6111 vm_object_lock(object);
6112 if ((old_page = vm_page_lookup(object, offset))
6126 vm_page_insert(m, object, offset);
6129 vm_object_unlock(object);
6135 vm_object_t object,
6144 if (!object->private)
6149 vm_object_lock(object);
6151 if (!object->phys_contiguous) {
6155 vm_object_unlock(object);
6158 base_offset += object->paging_offset;
6161 m = vm_page_lookup(object, base_offset);
6211 vm_page_insert(m, object, base_offset);
6226 object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
6227 object->vo_size = size;
6229 vm_object_unlock(object);
6255 register vm_object_t object = VM_OBJECT_NULL;
6266 queue_iterate(&vm_object_cached_list, object,
6268 if (object->pager &&
6269 (pager_ops == object->pager->mo_pager_ops)) {
6270 vm_object_lock(object);
6271 queue_remove(&vm_object_cached_list, object,
6277 * Since this object is in the cache, we know
6283 assert(object->pager_initialized);
6284 assert(object->ref_count == 0);
6285 vm_object_lock_assert_exclusive(object);
6286 object->ref_count++;
6289 * Terminate the object.
6290 * If the object had a shadow, we let
6297 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
6299 if ((vm_object_terminate(object) == KERN_SUCCESS)
6325 vm_object_t object;
6337 (entry->object != VM_OBJECT_NULL)) {
6338 if (entry->object->named == TRUE)
6342 if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE)) == VM_OBJECT_NULL) {
6346 /* wait for object (if any) to be ready */
6347 if (object != VM_OBJECT_NULL) {
6348 vm_object_lock(object);
6349 object->named = TRUE;
6350 while (!object->pager_ready) {
6351 vm_object_sleep(object,
6355 *control = object->pager_control;
6356 vm_object_unlock(object);
6365 * Attempt to recover a named reference for a VM object.
6366 * VM will verify that the object has not already started
6370 * KERN_SUCCESS - we recovered a named reference on the object
6371 * KERN_FAILURE - we could not recover a reference (object dead)
6372 * KERN_INVALID_ARGUMENT - bad memory object control
6379 vm_object_t object;
6381 object = memory_object_control_to_vm_object(control);
6382 if (object == VM_OBJECT_NULL) {
6386 vm_object_lock(object);
6388 if (object->terminating && wait_on_terminating) {
6389 vm_object_wait(object,
6395 if (!object->alive) {
6396 vm_object_unlock(object);
6400 if (object->named == TRUE) {
6401 vm_object_unlock(object);
6405 if ((object->ref_count == 0) && (!object->terminating)) {
6407 vm_object_unlock(object);
6410 queue_remove(&vm_object_cached_list, object,
6415 object,
6422 object->named = TRUE;
6423 vm_object_lock_assert_exclusive(object);
6424 object->ref_count++;
6425 vm_object_res_reference(object);
6426 while (!object->pager_ready) {
6427 vm_object_sleep(object,
6431 vm_object_unlock(object);
6455 vm_object_t object,
6461 while (object != VM_OBJECT_NULL) {
6463 vm_object_lock(object);
6465 assert(object->alive);
6467 assert(object->named);
6468 assert(object->ref_count > 0);
6472 * destroying or caching the object.
6475 if (object->pager_created && !object->pager_initialized) {
6476 assert(!object->can_persist);
6477 vm_object_assert_wait(object,
6480 vm_object_unlock(object);
6485 if (((object->ref_count > 1)
6487 || (object->terminating)) {
6488 vm_object_unlock(object);
6492 vm_object_unlock(object);
6498 (object->ref_count == 1)) {
6500 object->named = FALSE;
6501 vm_object_unlock(object);
6504 vm_object_deallocate(object);
6507 VM_OBJ_RES_DECR(object);
6508 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
6510 if (object->ref_count == 1) {
6511 if (vm_object_terminate(object) != KERN_SUCCESS) {
6520 object = shadow;
6525 vm_object_lock_assert_exclusive(object);
6526 object->ref_count--;
6527 assert(object->ref_count > 0);
6529 object->named = FALSE;
6530 vm_object_unlock(object);
6542 vm_object_t object,
6555 object, offset, size,
6561 if (object == VM_OBJECT_NULL)
6570 * Lock the object, and acquire a paging reference to
6573 vm_object_lock(object);
6574 vm_object_paging_begin(object);
6576 (void)vm_object_update(object,
6579 vm_object_paging_end(object);
6580 vm_object_unlock(object);
6586 * Empty a purgeable object by grabbing the physical pages assigned to it and
6591 * than happy to grab these since this is a purgeable object. We mark the
6592 * object as "empty" after reaping its pages.
6594 * On entry the object must be locked and it must be
6598 vm_object_purge(vm_object_t object, int flags)
6600 vm_object_lock_assert_exclusive(object);
6602 if (object->purgable == VM_PURGABLE_DENY)
6605 assert(object->copy == VM_OBJECT_NULL);
6606 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
6609 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
6618 * vm_page_purgeable_count must have been updated when the object
6623 if (object->purgable == VM_PURGABLE_VOLATILE) {
6625 assert(object->resident_page_count >=
6626 object->wired_page_count);
6627 delta = (object->resident_page_count -
6628 object->wired_page_count);
6635 if (object->wired_page_count != 0) {
6637 object->wired_page_count);
6638 OSAddAtomic(-object->wired_page_count,
6641 object->purgable = VM_PURGABLE_EMPTY;
6643 assert(object->purgable == VM_PURGABLE_EMPTY);
6645 vm_object_reap_pages(object, REAP_PURGEABLE);
6647 if (object->pager != NULL &&
6651 if (object->activity_in_progress == 0 &&
6652 object->paging_in_progress == 0) {
6654 * Also reap any memory coming from this object
6657 * There are no operations in progress on the VM object
6659 * VM object lock, so it's safe to reap the compressed
6662 pgcount = vm_compressor_pager_get_count(object->pager);
6664 pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
6665 vm_compressor_pager_count(object->pager,
6668 object);
6669 vm_purgeable_compressed_update(object,
6673 assert(vm_compressor_pager_get_count(object->pager)
6679 * for this object, which could result in a page
6681 * the VM object is not locked, so it could race
6687 * decompressing a page from a purgeable object
6692 * progress on the VM object.
6697 vm_object_lock_assert_exclusive(object);
6703 * state of a purgeable object. A purgeable object is created via a call to
6704 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
6705 * never be coalesced with any other object -- even other purgeable objects --
6706 * and will thus always remain a distinct object. A purgeable object has
6708 * count is greater than 1, then a purgeable object will behave like a normal
6709 * object and attempts to use this interface will result in an error return
6712 * A purgeable object may be put into a "volatile" state which will make the
6713 * object's pages elligable for being reclaimed without paging to backing
6715 * purgeable object are reclaimed, the purgeable object is said to have been
6716 * "emptied." When a purgeable object is emptied the system will reclaim as
6717 * many pages from the object as it can in a convenient manner (pages already
6719 * a purgeable object is made volatile, its pages will generally be reclaimed
6721 * generally used by applications which can recreate the data in the object
6725 * A purgeable object may be designated as "non-volatile" which means it will
6728 * object was emptied before the object was made non-volatile, that fact will
6729 * be returned as the old state of the purgeable object (see
6730 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
6731 * were reclaimed as part of emptying the object will be refaulted in as
6732 * zero-fill on demand. It is up to the application to note that an object
6734 * purgeable object is made non-volatile, its pages will generally not be paged
6735 * out to backing store in the immediate future. A purgeable object may also
6739 * volatile purgeable object may be queried at any time. This information may
6743 * The specified address may be any address within the purgeable object. If
6744 * the specified address does not represent any object in the target task's
6746 * object containing the specified address is not a purgeable object, then
6752 * state is used to set the new state of the purgeable object and return its
6754 * object is returned in the parameter state.
6759 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
6760 * immediately reclaim as many pages in the object as can be conveniently
6764 * The process of making a purgeable object non-volatile and determining its
6765 * previous state is atomic. Thus, if a purgeable object is made
6767 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
6768 * completely intact and will remain so until the object is made volatile
6769 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
6774 * The object must be locked.
6778 vm_object_t object,
6785 if (object == VM_OBJECT_NULL) {
6792 vm_object_lock_assert_exclusive(object);
6795 * Get current state of the purgeable object.
6797 old_state = object->purgable;
6802 assert(object->copy == VM_OBJECT_NULL);
6803 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
6814 object->volatile_empty = TRUE;
6817 object->volatile_fault = TRUE;
6822 object->volatile_empty) {
6829 object->purgable = new_state;
6834 assert(object->resident_page_count >=
6835 object->wired_page_count);
6836 delta = (object->resident_page_count -
6837 object->wired_page_count);
6845 if (object->wired_page_count != 0) {
6847 object->wired_page_count);
6848 OSAddAtomic(-object->wired_page_count,
6854 /* object should be on a queue */
6855 assert(object->objq.next != NULL &&
6856 object->objq.prev != NULL);
6860 * Move object from its volatile queue to the
6863 queue = vm_purgeable_object_remove(object);
6866 if (object->purgeable_when_ripe) {
6876 * Transfer the object's pages from the volatile to
6879 vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE,
6886 if (object->volatile_fault) {
6890 queue_iterate(&object->memq, p, vm_page_t, listq) {
6905 object->resident_page_count == 0 &&
6906 object->pager == NULL)
6927 object->purgeable_when_ripe = FALSE;
6929 object->purgeable_when_ripe = TRUE;
6932 if (object->purgeable_when_ripe) {
6946 assert(object->resident_page_count >=
6947 object->wired_page_count);
6948 delta = (object->resident_page_count -
6949 object->wired_page_count);
6955 if (object->wired_page_count != 0) {
6956 OSAddAtomic(object->wired_page_count,
6960 object->purgable = new_state;
6962 /* object should be on "non-volatile" queue */
6963 assert(object->objq.next != NULL);
6964 assert(object->objq.prev != NULL);
6974 * object. If a new token is added, the most important object' priority is boosted.
6976 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
6978 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
6980 old_queue = vm_purgeable_object_remove(object);
6992 object->purgeable_when_ripe)) {
6997 if (object->purgeable_when_ripe) {
7000 object->purgeable_when_ripe = purgeable_when_ripe;
7001 if (object->purgeable_when_ripe) {
7009 vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
7011 vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
7021 if (object->volatile_fault) {
7025 queue_iterate(&object->memq, p, vm_page_t, listq) {
7049 /* object should be on a queue */
7050 assert(object->objq.next != NULL &&
7051 object->objq.prev != NULL);
7053 old_queue = vm_purgeable_object_remove(object);
7055 if (object->purgeable_when_ripe) {
7064 * This object's pages were previously accounted as
7068 vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
7076 object->purgable = VM_PURGABLE_EMPTY;
7079 (void) vm_object_purge(object, 0);
7080 assert(object->purgable == VM_PURGABLE_EMPTY);
7087 vm_object_lock_assert_exclusive(object);
7094 vm_object_t object,
7109 if (object == VM_OBJECT_NULL)
7117 vm_object_lock_assert_exclusive(object);
7128 * - the entire object is exactly covered by the request.
7130 if (offset == 0 && (object->vo_size == size)) {
7132 *resident_page_count = object->resident_page_count;
7137 if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
7139 queue_iterate(&object->memq, p, vm_page_t, listq) {
7158 p = vm_page_lookup(object, cur_offset);
7194 * Called from vm_object_deallocate and when swapping out an object.
7196 * The object is locked, and remains locked throughout the function,
7198 * will be dropped, but not the original object.
7205 vm_object_t object)
7207 vm_object_t orig_object = object;
7210 * from vm_object_deallocate. Original object is never
7213 assert(object->res_count > 0);
7214 while (--object->res_count == 0) {
7215 assert(object->ref_count >= object->res_count);
7216 vm_object_deactivate_all_pages(object);
7218 if (object->shadow != VM_OBJECT_NULL) {
7219 vm_object_t tmp_object = object->shadow;
7221 if (object != orig_object)
7222 vm_object_unlock(object);
7223 object = tmp_object;
7224 assert(object->res_count > 0);
7228 if (object != orig_object)
7229 vm_object_unlock(object);
7235 * Internal function to increment residence count on a vm object
7237 * when swapping in a vm object, via vm_map_swap.
7239 * The object is locked, and remains locked throughout the function,
7241 * will be dropped, but not the original object.
7248 vm_object_t object)
7250 vm_object_t orig_object = object;
7255 while ((++object->res_count == 1) &&
7256 (object->shadow != VM_OBJECT_NULL)) {
7257 vm_object_t tmp_object = object->shadow;
7259 assert(object->ref_count >= object->res_count);
7261 if (object != orig_object)
7262 vm_object_unlock(object);
7263 object = tmp_object;
7265 if (object != orig_object)
7266 vm_object_unlock(object);
7274 * Gets another reference to the given object.
7281 register vm_object_t object)
7283 if (object == VM_OBJECT_NULL)
7286 vm_object_lock(object);
7287 assert(object->ref_count > 0);
7288 vm_object_reference_locked(object);
7289 vm_object_unlock(object);
7298 * have memory object associated with them. Havving this cache too
7368 * Allocate a temporary VM object to hold object1's contents
7377 * Grab control of the 1st VM object.
7391 * We're about to mess with the object's backing store and
7393 * to prevent any paging activity on this object, so the caller should
7401 * object locked, to guarantee that no one tries to access its pager.
7406 * Same as above for the 2nd object...
7442 * an intermediate object.
7453 * an intermediate object.
7493 /* "Lock" refers to the object not its contents */
7497 /* "ref_count" refers to the object not its contents */
7499 /* "res_count" refers to the object not its contents */
7524 /* "paging_in_progress" refers to the object not its contents */
7529 /* "all_wanted" refers to the object not its contents */
7545 /* "shadowed" refers to the the object not its contents */
7553 /* "shadow_severed" refers to the object not its contents */
7556 /* "cached_list.next" points to transposed object */
7562 /* "msr_q" is linked to the object not its contents */
7584 hash_entry->object = object2;
7591 hash_entry->object = object1;
7606 /* "uplq" refers to the object not its contents (see upl_transpose()) */
7624 * Re-initialize the temporary object to avoid
7668 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
7716 vm_object_lock(object);
7718 if (object->pager == MEMORY_OBJECT_NULL)
7719 goto out; /* pager is gone for this object, nothing more to do */
7722 vnode_pager_get_isSSD(object->pager, &isSSD);
7745 if (object->internal)
7746 object_size = object->vo_size;
7748 vnode_pager_get_object_size(object->pager, &object_size);
7760 if (object->pages_used > object->pages_created) {
7765 object->pages_used = object->pages_created = 0;
7767 if ((sequential_run = object->sequential)) {
7782 if (object->internal && fault_info->user_tag == VM_MEMORY_STACK)
7796 if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
7808 pre_heat_size = (max_length * object->pages_used) / object->pages_created;
7843 if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
7934 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
7942 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
7946 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7970 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
7978 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
7981 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7994 vm_object_unlock(object);
8002 * the UPL regimen but takes place on the VM object rather than on a UPL
8007 vm_object_t object,
8015 vm_object_lock(object);
8018 if(object->phys_contiguous) {
8021 (object->vo_shadow_offset >> PAGE_SHIFT);
8023 vm_object_unlock(object);
8026 vm_object_unlock(object);
8030 if(object->phys_contiguous) {
8031 vm_object_unlock(object);
8036 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
8037 vm_object_unlock(object);
8047 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
8150 vm_object_unlock(object);
8167 vm_object_t object,
8180 if (object->resident_page_count == 0) {
8191 vm_object_lock(object);
8193 if (object->phys_contiguous) {
8194 vm_object_unlock(object);
8201 dst_page = vm_page_lookup(object, offset);
8209 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
8213 * it might even belong to a different object
8235 vm_object_unlock(object);
8256 memory_object_control_t object,
8273 pager_object = memory_object_control_to_vm_object(object);
8294 vm_object_lock(vm_object_t object)
8296 if (object == vm_pageout_scan_wants_object) {
8300 lck_rw_lock_exclusive(&object->Lock);
8304 vm_object_lock_avoid(vm_object_t object)
8306 if (object == vm_pageout_scan_wants_object) {
8314 _vm_object_lock_try(vm_object_t object)
8316 return (lck_rw_try_lock_exclusive(&object->Lock));
8320 vm_object_lock_try(vm_object_t object)
8325 if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
8328 return _vm_object_lock_try(object);
8332 vm_object_lock_shared(vm_object_t object)
8334 if (vm_object_lock_avoid(object)) {
8337 lck_rw_lock_shared(&object->Lock);
8341 vm_object_lock_try_shared(vm_object_t object)
8343 if (vm_object_lock_avoid(object)) {
8346 return (lck_rw_try_lock_shared(&object->Lock));
8353 * The object must be locked
8356 vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
8360 vm_object_lock_assert_exclusive(object);
8362 vm_object_paging_wait(object, THREAD_UNINT);
8364 queue_iterate(&object->memq, p, vm_page_t, listq) {
8370 object->set_cache_attr = FALSE;
8372 object->set_cache_attr = TRUE;
8374 object->wimg_bits = wimg_mode;
8409 /* object should be on a queue */
8425 * This object was "volatile" so its pages must have
8516 vm_object_t object)
8524 assert(object != VM_OBJECT_NULL );
8526 vm_object_lock(object);
8529 if (!object->pager_initialized) {
8531 * If there is no memory object for the page, create
8534 vm_object_pager_create(object);
8546 next = (vm_page_t)queue_first(&object->memq);
8548 while (!queue_end(&object->memq, (queue_entry_t)next)) {
8569 vm_object_unlock(object);
8573 vm_object_lock(object);
8614 vm_object_unlock(object);
8619 vm_object_t object)
8624 vm_object_lock(object);
8626 pager = object->pager;
8628 if (!object->pager_ready || pager == MEMORY_OBJECT_NULL) {
8629 vm_object_unlock(object);
8633 vm_object_paging_wait(object, THREAD_UNINT);
8634 vm_object_paging_begin(object);
8636 object->blocked_access = TRUE;
8637 vm_object_unlock(object);
8641 vm_object_lock(object);
8643 object->blocked_access = FALSE;
8644 vm_object_paging_end(object);
8646 vm_object_unlock(object);
8686 vm_object_t object;
8733 /* Get the VM object for this UPL */
8735 object = io_upl->map_object->shadow;
8737 object = io_upl->map_object;
8740 /* Get the dev vnode ptr for this object */
8741 if(!object || !object->pager ||
8742 vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
8801 Scan through all UPLs associated with the object to find the