• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:object

414  *		The object must be locked, and will be returned locked.
418 vm_object_t object)
423 * Deal with the deallocation (last reference) of a pageout object
425 * freeing pages in the original object.
428 assert(object->pageout);
429 shadow_object = object->shadow;
432 while (!queue_empty(&object->memq)) {
436 p = (vm_page_t) queue_first(&object->memq);
448 offset + object->shadow_offset);
490 * Revoke all access to the page. Since the object is
586 assert(object->ref_count == 0);
587 assert(object->paging_in_progress == 0);
588 assert(object->resident_page_count == 0);
599 * The page must not be busy, and the object and page
617 (integer_t)m->object, m->offset, (integer_t)m,
650 * the appropriate memory object. This routine is used to push
651 * pages into a copy-object when they are modified in the
652 * permanent object.
654 * The page is moved to a temporary object and paged out.
658 * The object to which it belongs must be locked.
662 * Move this page to a completely new object.
668 vm_object_t object;
686 * Create a paging reference to let us play with the object.
688 object = m->object;
689 paging_offset = m->offset + object->paging_offset;
694 vm_object_unlock(object);
701 * never happen since this should be a copy object and therefore not
702 * an external object, so the pager should always be there.
705 pager = object->pager;
709 panic("missing pager for copy object");
714 vm_object_paging_begin(object);
725 vm_object_unlock(object);
729 * Note that the data is passed by naming the new object,
732 * [The object reference from its allocation is donated
737 vm_object_lock(object);
738 vm_object_paging_end(object);
758 * The page must be busy, and the object and queues locked. We will take a
760 * release the object lock back at the call site. The I/O thread
769 vm_object_t object = m->object;
774 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
775 (integer_t)object, m->offset, (integer_t)m, 0, 0);
785 * protect the object from collapse -
786 * locking in the object's paging_offset.
788 vm_object_paging_begin(object);
800 if (object->internal == TRUE)
833 assert(m->object != VM_OBJECT_NULL);
834 assert(m->object != kernel_object);
836 if (m->object->internal == TRUE)
890 vm_object_t object;
952 * and it belongs to an object at least VM_ZF_OBJECT_SIZE_THRESHOLD big.
990 object = NULL;
1033 assert(m->object != kernel_object);
1039 * Try to lock object; since we've already got the
1042 * to allow the owner of the object lock a chance to
1044 * object in the same state as we work our way through
1046 * object are fairly typical on the inactive and active queues
1048 if (m->object != object) {
1049 if (object != NULL) {
1050 vm_object_unlock(object);
1051 object = NULL;
1054 if (!vm_object_lock_try_scan(m->object)) {
1067 * this is the next object we're going to be interested in
1071 vm_pageout_scan_wants_object = m->object;
1075 object = m->object;
1100 * Deactivate the page while holding the object
1116 if (object != NULL) {
1117 vm_object_unlock(object);
1118 object = NULL;
1154 if (object != NULL) {
1155 vm_object_unlock(object);
1156 object = NULL;
1210 if (object != NULL) {
1211 vm_object_unlock(object);
1212 object = NULL;
1341 if (object != NULL) {
1342 vm_object_unlock(object);
1343 object = NULL;
1450 assert(m->object != kernel_object);
1457 * with the same object... if so, we've
1460 if (m->object != object) {
1462 * the object associated with candidate page is
1466 if (object != NULL) {
1467 vm_object_unlock(object);
1468 object = NULL;
1472 * Try to lock object; since we've alread got the
1475 * to allow the owner of the object lock a chance to
1477 * object in the same state as we work our way through
1479 * object are fairly typical on the inactive and active queues
1481 if (!vm_object_lock_try_scan(m->object)) {
1554 * this is the next object we're going to be interested in
1558 vm_pageout_scan_wants_object = m->object;
1568 object = m->object;
1584 if (!object->pager_initialized && object->pager_created) {
1649 /* If the object is empty, the page must be reclaimed even if dirty or used. */
1650 /* If the page belongs to a volatile object, we stick it back on. */
1651 if (object->copy == VM_OBJECT_NULL) {
1652 if(object->purgable == VM_PURGABLE_EMPTY && !m->cleaning) {
1667 if (object->purgable == VM_PURGABLE_VOLATILE) {
1690 if ( !m->encrypted_cleaning && (m->busy || !object->alive)) {
1715 if (m->object->internal) {
1817 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
1818 (integer_t)object, (integer_t)m->offset, (integer_t)m, 0,0);
1839 if (object->internal) {
1849 object->internal &&
1850 (object->purgable == VM_PURGABLE_DENY ||
1851 object->purgable == VM_PURGABLE_NONVOLATILE ||
1852 object->purgable == VM_PURGABLE_VOLATILE )) {
1946 if (object->internal) {
1968 if (object != NULL) {
1969 vm_object_unlock(object);
1970 object = NULL;
2043 * queues and object locks held
2050 if (m->object->internal == TRUE)
2062 vm_object_paging_end(m->object);
2081 vm_object_t object;
2121 object = m->object;
2123 vm_object_lock(object);
2125 if (!object->pager_initialized) {
2128 * If there is no memory object for the page, create
2132 if (!object->pager_initialized)
2133 vm_object_collapse(object,
2136 if (!object->pager_initialized)
2137 vm_object_pager_create(object);
2138 if (!object->pager_initialized) {
2140 * Still no pager for the object.
2162 vm_object_paging_end(object);
2163 vm_object_unlock(object);
2169 pager = object->pager;
2182 vm_object_paging_end(object);
2183 vm_object_unlock(object);
2188 vm_object_unlock(object);
2191 * already been taken on the object before it was added
2193 * keep the object from being terminated and/or the
2195 * completed... therefore no need to lock the object to
2202 m->offset + object->paging_offset,
2210 vm_object_lock(object);
2211 vm_object_paging_end(object);
2212 vm_object_unlock(object);
2481 vm_object_t object;
2484 object = upl->map_object->shadow;
2486 object = upl->map_object;
2488 vm_object_lock(object);
2489 queue_remove(&object->uplq, upl, upl_t, uplq);
2490 vm_object_unlock(object);
2495 * not a pageout object is inserted
2556 * between the vm_object and its backing memory object. The
2568 * in the original object, if a page list structure
2575 * the original object. If a page list structure
2581 * against a default pager backed object. Only the default
2591 vm_object_t object,
2623 if ( (!object->internal) && (object->paging_offset != 0) )
2624 panic("vm_object_upl_request: external object with non-zero paging offset\n");
2625 if (object->phys_contiguous)
2626 panic("vm_object_upl_request: contiguous object specified\n");
2665 upl->map_object = object;
2669 * No neeed to lock the new object: nobody else knows
2672 upl->map_object->shadow = object;
2677 upl->map_object->wimg_bits = object->wimg_bits;
2696 vm_object_lock(object);
2697 vm_object_paging_begin(object);
2703 upl->offset = offset + object->paging_offset;
2706 queue_enter(&object->uplq, upl, upl_t, uplq);
2709 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
2715 * make sure that the copy object has its own
2719 vm_object_update(object,
2731 * remember which copy object we synchronized with
2733 last_copy_object = object->copy;
2746 vm_object_unlock(object);
2753 * then tries for the object lock... to avoid what
2756 * if this object contains the majority of the pages resident
2761 * successfully acquire the object lock of any candidate page
2764 vm_object_unlock(object);
2769 if (vm_object_lock_try(object))
2779 if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
2866 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
2895 * the object lock...
2928 if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious)) && !dst_page->list_req_pending) {
2982 vm_external_state_set(object->existence_map, dst_page->offset);
3022 if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
3026 * The copy object has changed since we
3028 * Another copy object might have been
3029 * inserted while we released the object's
3032 * through that new object, we have to
3044 if (object->copy != VM_OBJECT_NULL) {
3049 object,
3065 * remember the copy object we synced with
3067 last_copy_object = object->copy;
3069 dst_page = vm_page_lookup(object, dst_offset);
3089 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
3115 if (object->private) {
3155 vm_object_unlock(object);
3160 * then tries for the object lock... to avoid what
3163 * if this object contains the majority of the pages resident
3168 * successfully acquire the object lock of any candidate page
3174 if (vm_object_lock_try(object))
3183 vm_page_insert_internal(dst_page, object, dst_offset, TRUE);
3229 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
3237 * original object and its prodigy
3354 * then tries for the object lock... to avoid what
3357 * if this object contains the majority of the pages resident
3362 * successfully acquire the object lock of any candidate page
3365 vm_object_unlock(object);
3369 if (vm_object_lock_try(object))
3398 vm_object_unlock(object);
3467 vm_object_t object,
3476 if (object->paging_offset > offset)
3479 assert(object->paging_in_progress);
3480 offset = offset - object->paging_offset;
3489 super_size = ((base_offset + super_size) > object->size) ? (object->size - base_offset) : super_size;
3495 size, object->paging_offset);
3500 * object size
3508 return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
3557 if (entry->object.vm_object != VM_OBJECT_NULL) {
3558 if (entry->object.vm_object->private)
3561 if (entry->object.vm_object->phys_contiguous)
3568 if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
3573 * Create an object if necessary.
3575 if (entry->object.vm_object == VM_OBJECT_NULL) {
3576 entry->object.vm_object = vm_object_allocate((vm_size_t)(entry->vme_end - entry->vme_start));
3586 vm_object_t object;
3599 &version, &object,
3608 vm_object_unlock(object);
3617 submap = entry->object.sub_map;
3632 if (entry->object.vm_object->shadow || entry->object.vm_object->copy) {
3633 local_object = entry->object.vm_object;
3640 if (entry->object.vm_object->shadow && entry->object.vm_object->copy) {
3658 local_object = entry->object.vm_object;
3678 if (entry->object.vm_object->private)
3683 if (entry->object.vm_object->phys_contiguous)
3686 local_object = entry->object.vm_object;
3742 vm_object_t object;
3755 object = upl->map_object;
3760 upl->map_object->shadow = object;
3764 upl->map_object->shadow_offset = upl->offset - object->paging_offset;
3765 upl->map_object->wimg_bits = object->wimg_bits;
3779 vm_object_lock(object);
3781 m = vm_page_lookup(object, offset);
3802 vm_object_unlock(object);
3815 * key depends on the VM page's "pager" object and
3863 cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
3943 vm_object_t object;
3987 object = upl->map_object;
3990 vm_object_lock(object);
3991 shadow_object = object->shadow;
3993 shadow_object = object;
4000 * then tries for the object lock... to avoid what
4003 * if this object contains the majority of the pages resident
4008 * successfully acquire the object lock of any candidate page
4024 * If the object is code-signed, do not let this UPL tell
4055 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
4062 m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
4225 if (m->object->internal) {
4336 * then tries for the object lock... to avoid what
4339 * if this object contains the majority of the pages resident
4344 * successfully acquire the object lock of any candidate page
4389 if (object == shadow_object) {
4391 * this is not a paging object
4394 * against this object
4400 * the map object... vm_pageout_object_terminate
4406 if (object != shadow_object)
4407 vm_object_unlock(object);
4427 vm_object_t object;
4460 object = upl->map_object;
4463 vm_object_lock(object);
4464 shadow_object = object->shadow;
4466 shadow_object = object;
4473 * then tries for the object lock... to avoid what
4476 * if this object contains the majority of the pages resident
4481 * successfully acquire the object lock of any candidate page
4511 if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
4517 m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
4585 vm_external_state_clr(m->object->existence_map, m->offset);
4607 * then tries for the object lock... to avoid what
4610 * if this object contains the majority of the pages resident
4615 * successfully acquire the object lock of any candidate page
4663 if (object == shadow_object) {
4665 * this is not a paging object
4668 * against this object
4674 * the map object... vm_pageout_object_terminate
4680 if (object != shadow_object)
4681 vm_object_unlock(object);
4714 vm_object_t object,
4750 if (object->phys_contiguous) {
4751 if ((offset + object->shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
4754 if (((offset + object->shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
4776 if (((size/page_size) > MAX_UPL_SIZE) && !object->phys_contiguous)
4783 if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
4787 if ((!object->internal) && (object->paging_offset != 0))
4788 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
4791 if (object->phys_contiguous)
4811 upl->map_object = object;
4814 vm_object_lock(object);
4815 vm_object_paging_begin(object);
4819 upl->offset = offset + object->paging_offset;
4821 if (object->phys_contiguous) {
4823 queue_enter(&object->uplq, upl, upl_t, uplq);
4826 vm_object_unlock(object);
4834 upl->highest_page = (offset + object->shadow_offset + size - 1)>>PAGE_SHIFT;
4837 user_page_list[0].phys_addr = (offset + object->shadow_offset)>>PAGE_SHIFT;
4851 object->true_share = TRUE;
4853 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
4854 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
4857 queue_enter(&object->uplq, upl, upl_t, uplq);
4882 dst_page = vm_page_lookup(object, dst_offset);
4913 result = vm_fault_page(object, dst_offset,
4932 local_object = top_page->object;
4934 if (top_page->object != dst_page->object) {
4947 vm_object_lock(object);
4948 vm_object_paging_begin(object);
4954 vm_object_lock(object);
4955 vm_object_paging_begin(object);
4960 vm_object_lock(object);
4961 vm_object_paging_begin(object);
4971 vm_object_lock(object);
4972 vm_object_paging_begin(object);
5007 * we musn't drop the object lock... we don't
5027 vm_page_replace(low_page, object, dst_offset);
5038 * here, because we've never dropped the object lock
5109 vm_object_unlock(object);
5118 vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
5128 dst_page = vm_page_lookup(object, offset);
5139 vm_object_paging_end(object);
5140 vm_object_unlock(object);
5197 * Make each UPL point to the correct VM object, i.e. the
5198 * object holding the pages that the UPL refers to...
5304 map_entry->object.vm_object = kernel_object;
5317 * Maps part of a VM object's pages in the kernel
5321 * The VM object is locked. This lock will get
5323 * must make sure the VM object is kept alive
5333 vm_object_t object,
5376 * If we can afford to unlock the VM object,
5382 * We can't afford to unlock the VM object, so
5411 * Keep the VM object locked over the PMAP_ENTER
5420 ((int) page->object->wimg_bits &
5448 * Try and map the required range of the object
5452 vm_object_reference_locked(object); /* for the map entry */
5453 vm_object_unlock(object);
5460 object,
5469 vm_object_deallocate(object); /* for the map entry */
5470 vm_object_lock(object);
5479 vm_object_lock(object);
5481 * VM object must be kept locked from before PMAP_ENTER()
5492 page = vm_page_lookup(object, offset + page_map_offset);
5495 vm_object_unlock(object);
5501 vm_object_lock(object);
5508 cache_attr = ((unsigned int) object->wimg_bits) & VM_WIMG_MASK;
5528 * Unmaps part of a VM object's pages from the kernel
5531 * The VM object is locked. This lock will get
5536 vm_object_t object,
5552 if (object != VM_OBJECT_NULL) {
5553 vm_object_unlock(object);
5556 if (object != VM_OBJECT_NULL) {
5557 vm_object_lock(object);
5696 * The page's object is locked, but this lock will be released
5733 * Take a paging-in-progress reference to keep the object
5737 vm_object_paging_begin(page->object);
5748 page->object,
5776 encrypt_iv.vm.pager_object = page->object->pager;
5778 page->object->paging_offset + page->offset;
5804 vm_paging_unmap_object(page->object,
5823 vm_object_paging_end(page->object);
5835 * The page's VM object is locked but will be unlocked and relocked.
5858 * Take a paging-in-progress reference to keep the object
5862 vm_object_paging_begin(page->object);
5873 page->object,
5896 decrypt_iv.vm.pager_object = page->object->pager;
5898 page->object->paging_offset + page->offset;
5923 vm_paging_unmap_object(page->object,
5969 vm_object_paging_end(page->object);
6007 * Find the VM object that contains the actual pages.
6012 * The offset in the shadow object is actually also
6171 assert(m->object != kernel_object);
6186 assert(m->object != kernel_object);
6202 assert(m->object != kernel_object);
6224 assert(m->object != kernel_object);