• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:object

136 	vm_object_t 		object,
261 vm_object_t object,
269 vm_object_t object);
375 * by copying VM object references from one map to
378 * to a VM object region exists in any map when this strategy
379 * is used -- this means that shadow object creation can be
382 * the same region of a vm object, and hence cannot delay creating
395 * Placeholder object for submap operations. This object is dropped
417 * This remaps the requested part of the object with an object backed by
440 /* lookup the protected VM object */
450 protected_object = map_entry->object.vm_object;
452 /* there should be a VM object here at this point */
458 * Lookup (and create if necessary) the protected memory object
459 * matching that VM object.
460 * If successful, this also grabs a reference on the memory object,
474 /* map this memory object in place of the current one */
492 * memory object.
1010 * If the entry is an object, we call vm_object_res_reference
1019 if (entry->object.vm_object != VM_OBJECT_NULL) {
1021 vm_map_t lmap = entry->object.sub_map;
1026 vm_object_t object = entry->object.vm_object;
1027 vm_object_lock(object);
1032 vm_object_res_reference(object);
1033 vm_object_unlock(object);
1071 * If the entry is an object, we call vm_object_res_deallocate
1080 if (entry->object.vm_object != VM_OBJECT_NULL) {
1082 vm_map_t lmap = entry->object.sub_map;
1087 vm_object_t object = entry->object.vm_object;
1088 vm_object_lock(object);
1095 vm_object_res_deallocate(object);
1096 vm_object_unlock(object);
1228 * If an entry is allocated, the object/offset fields
1351 new_entry->object.vm_object = VM_OBJECT_NULL;
1395 * Force pages from the specified object to be entered into
1397 * As soon as a page not found in the object the scan ends.
1410 register vm_object_t object,
1423 vm_object_lock(object);
1425 m = vm_page_lookup(object, offset);
1434 vm_object_unlock(object);
1440 printf("map: %p, addr: %llx, object: %p, offset: %llx\n",
1441 map, (unsigned long long)addr, object, (unsigned long long)offset);
1448 vm_object_unlock(object);
1496 * the given memory object and offset into that object.
1510 vm_object_t object,
1565 if (object == VM_OBJECT_NULL) {
1616 (object != VM_OBJECT_NULL &&
1617 (object->size != size ||
1618 object->purgable == VM_PURGABLE_DENY))
1804 * Check if the same object is being mapped.
1807 if (entry->object.sub_map !=
1808 (vm_map_t) object) {
1813 if (entry->object.vm_object != object) {
1814 /* not the same VM object... */
1817 obj2 = entry->object.vm_object;
1820 (object == VM_OBJECT_NULL ||
1821 object->internal)) {
1865 * See whether we can avoid creating a new entry (and object) by
1869 * entities in order to implement their "volatile object"
1874 if (object == VM_OBJECT_NULL) {
1875 object = vm_object_allocate(size);
1876 object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1877 object->purgable = VM_PURGABLE_NONVOLATILE;
1881 (object == VM_OBJECT_NULL) &&
1896 if (vm_object_coalesce(entry->object.vm_object,
1931 if (object == VM_OBJECT_NULL &&
1939 object, offset, needs_copy,
1951 submap = (vm_map_t) object;
2004 if ((object != VM_OBJECT_NULL) &&
2015 object, offset, cur_protection);
2032 * memory object is being mapped. Some pagers need to keep
2034 * object, for example.
2043 * the memory object.
2048 object != VM_OBJECT_NULL &&
2049 object->named &&
2050 object->pager != MEMORY_OBJECT_NULL) {
2051 vm_object_lock(object);
2052 pager = object->pager;
2053 if (object->named &&
2055 assert(object->pager_ready);
2056 vm_object_mapping_wait(object, THREAD_UNINT);
2057 vm_object_mapping_begin(object);
2058 vm_object_unlock(object);
2063 vm_object_lock(object);
2064 vm_object_mapping_end(object);
2066 vm_object_unlock(object);
2182 vm_object_t object;
2201 * Find the vm object (if any) corresponding to this port.
2204 object = VM_OBJECT_NULL;
2227 /* offset from beginning of named entry offset in object */
2285 object = vm_object_enter(named_entry->backing.pager,
2290 if (object == VM_OBJECT_NULL) {
2298 vm_object_lock(object);
2299 vm_object_reference_locked(object);
2300 named_entry->backing.object = object;
2304 wimg_mode = object->wimg_bits;
2320 /* wait for object (if any) to be ready */
2322 while (!object->pager_ready) {
2324 object,
2327 vm_object_lock(object);
2331 if (object->wimg_bits != wimg_mode) {
2334 vm_object_paging_wait(object, THREAD_UNINT);
2336 object->wimg_bits = wimg_mode;
2337 queue_iterate(&object->memq, p, vm_page_t, listq) {
2346 object->true_share = TRUE;
2347 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2348 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2349 vm_object_unlock(object);
2352 /* an already mapped object. If the object is */
2354 /* object cannot be mapped until it is ready */
2357 object = named_entry->backing.object;
2358 assert(object != VM_OBJECT_NULL);
2360 vm_object_reference(object);
2367 * Detected fake ip_kotype for a memory object. In
2369 * instead is just a raw memory object.
2372 object = vm_object_enter((memory_object_t)port,
2374 if (object == VM_OBJECT_NULL)
2377 /* wait for object (if any) to be ready */
2378 if (object != VM_OBJECT_NULL) {
2379 if (object == kernel_object) {
2380 printf("Warning: Attempt to map kernel object"
2384 vm_object_lock(object);
2385 while (!object->pager_ready) {
2386 vm_object_wait(object,
2389 vm_object_lock(object);
2391 vm_object_unlock(object);
2397 if (object != VM_OBJECT_NULL &&
2398 object->named &&
2399 object->pager != MEMORY_OBJECT_NULL &&
2400 object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
2407 * memory object is being mapped. Some pagers need to keep
2409 * object, for example.
2418 * memory object.
2422 vm_object_lock(object);
2423 pager = object->pager;
2424 if (object->named &&
2426 object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
2427 assert(object->pager_ready);
2428 vm_object_mapping_wait(object, THREAD_UNINT);
2429 vm_object_mapping_begin(object);
2430 vm_object_unlock(object);
2435 vm_object_lock(object);
2436 vm_object_mapping_end(object);
2438 vm_object_unlock(object);
2449 result = vm_object_copy_strategically(object, offset, size,
2468 new_object = object;
2479 * original object, as it won't be mapped.
2482 vm_object_deallocate(object);
2487 object = new_object;
2495 object, offset,
2499 vm_object_deallocate(object);
2572 * Insert pages into object.
2590 * to enter it in a new VM object while encrypted.
2603 * Hang onto a reference on the object in case a
2606 * which we will insert this object.
2608 * Unfortunately, we must insert the object now before
2616 * Insert object into map.
2634 * A CPM object doesn't have can_persist set,
2752 assert(entry->object.sub_map != NULL);
2782 entry->object.sub_map,
2817 if (entry->object.vm_object &&
2819 entry->object.vm_object->phys_contiguous) {
2866 vm_map_reference(new_entry->object.sub_map);
2868 vm_object_reference(new_entry->object.vm_object);
2909 if (entry->object.vm_object &&
2911 entry->object.vm_object->phys_contiguous) {
2954 vm_map_reference(new_entry->object.sub_map);
2956 vm_object_reference(new_entry->object.vm_object);
3069 register vm_object_t object;
3089 ((object = entry->object.vm_object) == vm_submap_object) &&
3090 (object->resident_page_count == 0) &&
3091 (object->copy == VM_OBJECT_NULL) &&
3092 (object->shadow == VM_OBJECT_NULL) &&
3093 (!object->pager_created)) {
3095 entry->object.vm_object = VM_OBJECT_NULL;
3096 vm_object_deallocate(object);
3098 entry->object.sub_map = submap;
3113 (entry->object.sub_map)->pmap,
3293 pmap_protect(current->object.sub_map->pmap,
3655 vm_object_t object;
3664 pmap = entry->object.sub_map->pmap;
3702 &version, &object,
3716 vm_object_unlock(object);
3755 rc = vm_map_wire_nested(entry->object.sub_map,
3827 * lock on the map: create a shadow object for a
3828 * copy-on-write region, or an object for a zero-fill
3838 vm_object_shadow(&entry->object.vm_object,
3841 } else if (entry->object.vm_object == VM_OBJECT_NULL) {
3842 entry->object.vm_object = vm_object_allocate(size);
4126 pmap = entry->object.sub_map->pmap;
4172 vm_map_unwire_nested(entry->object.sub_map,
4210 vm_map_unwire_nested(entry->object.sub_map,
4326 * prevent VM object shadow chain collapsing, which can cause
4364 register vm_object_t object;
4375 object = NULL;
4376 submap = entry->object.sub_map;
4379 object = entry->object.vm_object;
4389 * Deallocate the object only after removing all
4395 vm_object_deallocate(object);
4430 entry->object.sub_map,
4435 && (entry->object.vm_object != NULL)) {
4437 entry->object.vm_object,
4464 entry->object.sub_map,
4468 && (entry->object.vm_object != NULL)) {
4470 entry->object.vm_object,
4511 register vm_object_t object;
4735 sub_map = tmp_entry.object.sub_map;
4753 tmp_entry.object.vm_object == kernel_object,
4827 entry->object.sub_map,
4833 entry->object.sub_map,
4836 } else if (entry->object.vm_object != kernel_object) {
4837 object = entry->object.vm_object;
4840 object, entry->offset,
4959 * Dispose of a map copy object (returned by
4980 vm_object_deallocate(entry->object.vm_object);
5004 * Move the information in a map copy object to
5005 * a new map copy object, leaving the old one
5012 * copy object will be deallocated; therefore,
5014 * object and leave the original empty so that
5027 * Allocate a new copy object, and copy the information
5037 * changed to point to the new copy object.
5046 * Change the old copy object into one that contains
5053 * Return the new object.
5123 entry->object.sub_map,
5181 if ((entry->object.vm_object != VM_OBJECT_NULL) &&
5182 ((!entry->object.vm_object->internal) ||
5183 (entry->object.vm_object->true_share))) {
5202 * object (copy; returned by vm_map_copyin) onto
5209 * memory object, it is preserved.
5214 * If successful, consumes the copy object.
5269 * Check for null copy object.
5369 entry->object.sub_map,
5426 if ((entry->object.vm_object != VM_OBJECT_NULL) &&
5427 ((!entry->object.vm_object->internal) ||
5428 (entry->object.vm_object->true_share))) {
5471 /* deconstruct the copy object and do in parts */
5533 entry->object.sub_map);
5534 entry->object.sub_map = NULL;
5575 /* adjust the copy object */
5623 entry->object.sub_map,
5627 entry->object.sub_map->pmap);
5630 entry->object.sub_map,
5636 entry->object.sub_map,
5706 /* adjust the copy object */
5831 * Throw away the vm_map_copy object
5861 * within 1 memory object so we have to find the smaller of "amount left"
5862 * "source object data size" and "target object data size". With
5864 * (copy) object should be one map entry, the target range may be split
5936 * Entry needs copy, create a shadow shadow object for
5946 vm_object_shadow(&entry->object.vm_object,
5953 dst_object = entry->object.vm_object;
5956 * to fault on it therefore we need a target object.
5965 entry->object.vm_object = dst_object;
5970 * Take an object reference and unlock map. The "entry" may
5982 copy_entry->object.vm_object,
5995 * Release the object reference
6011 vm_object_deallocate(copy_entry->object.vm_object);
6090 vm_object_t object;
6160 object = entry->object.vm_object;
6162 ((object == VM_OBJECT_NULL) ||
6163 (object->internal && !object->true_share))) ||
6165 vm_object_t old_object = entry->object.vm_object;
6173 if (old_object == copy_entry->object.vm_object &&
6200 entry->object.sub_map,
6207 entry->object.sub_map,
6211 entry->object.sub_map);
6215 entry->object.vm_object,
6232 entry->object = copy_entry->object;
6233 object = entry->object.vm_object;
6263 vm_object_t dst_object = entry->object.vm_object;
6268 * Take an object reference, and record
6286 copy_entry->object.vm_object,
6296 * Release the object reference
6316 vm_object_deallocate(copy_entry->object.vm_object);
6357 * If successful, returns a new copy object.
6406 * If successful, consumes the copy object.
6530 * If successful, consumes the copy object.
6548 * Check for null copy object.
6557 * Check for special copy object, created
6562 vm_object_t object = copy->cpy_object;
6572 object, offset, FALSE,
6577 /* Account for non-pagealigned copy object */
6706 register vm_object_t object;
6710 object = entry->object.vm_object;
6723 * Look up the page in the object.
6725 * top object:
6727 * the object was newly created by
6730 * the source object
6732 * the object was moved from the old
6735 * were in the top-level object.
6739 vm_object_lock(object);
6741 m = vm_page_lookup(object, offset);
6767 vm_object_unlock(object);
6835 * A vm_map_copy_t object (copy_result), suitable for
7021 src_map = tmp_entry->object.sub_map;
7037 if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) &&
7038 (tmp_entry->object.vm_object->phys_contiguous)) {
7043 /* based object. We can piggy-back off of */
7087 src_object = src_entry->object.vm_object;
7103 * If we are destroying the source, and the object
7104 * is internal, we can move the object reference
7107 * We make another reference to the object, because
7123 src_object, new_entry, new_entry->object.vm_object,
7128 &new_entry->object.vm_object,
7171 * Take an object reference, so that we may
7198 &new_entry->object.vm_object);
7216 new_entry->object.vm_object = new_object;
7224 &new_entry->object.vm_object,
7257 * same object/offset are still present.
7280 if ((src_entry->object.vm_object != src_object) ||
7291 vm_object_deallocate(new_entry->object.vm_object);
7391 /* when the various entries in the copy object were picked */
7412 * Create a copy object from an object.
7413 * Our caller donates an object reference.
7418 vm_object_t object,
7419 vm_object_offset_t offset, /* offset of region in object */
7420 vm_object_size_t size, /* size of region in object */
7426 * We drop the object into a special copy object
7427 * that contains the object directly.
7432 copy->cpy_object = object;
7446 vm_object_t object;
7451 * references original object. Internal
7454 * the right object. If we need a shadow,
7459 object = old_entry->object.vm_object;
7467 (old_entry->object.sub_map)->pmap,
7475 } else if (object == VM_OBJECT_NULL) {
7476 object = vm_object_allocate((vm_map_size_t)(old_entry->vme_end -
7479 old_entry->object.vm_object = object;
7481 } else if (object->copy_strategy !=
7487 * the right object.
7493 object->shadowed || /* case 2 */
7494 (!object->true_share && /* case 3 */
7496 (object->size >
7509 * object do not interfere with
7515 * deferred shadow object creation
7519 * object, and we are about to create
7521 * same object. The problem is that
7523 * an object to the entries pointing
7527 * entry to the object when handling
7530 * The second case is when the object
7533 * directly to the object without
7536 * of an object can be pointed to by
7538 * a single entry pointing to an object
7543 * The shadowed flag in the object allows
7545 * with this case is that if this object
7548 * of this object, since such a copy
7549 * allows the object to be changed, which
7551 * copies (which rely upon the object
7553 * flag says "don't change this object".
7555 * object for this object, and sharing
7557 * to change the shadow object (and thus
7560 * since this object is temporary, and
7561 * therefore a copy of the object is
7562 * as good as the object itself. (This
7568 * The third case is when the object
7577 vm_object_shadow(&old_entry->object.vm_object,
7599 old_entry->object.vm_object,
7615 object = old_entry->object.vm_object;
7619 * If object was using a symmetric copy strategy,
7628 vm_map_lock(old_entry->object.sub_map);
7629 vm_map_reference(old_entry->object.sub_map);
7630 vm_map_unlock(old_entry->object.sub_map);
7632 vm_object_lock(object);
7633 vm_object_reference_locked(object);
7634 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
7635 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
7637 vm_object_unlock(object);
7641 * Clone the entry, using object ref from above.
7811 ((old_entry->object.vm_object != NULL) &&
7812 (old_entry->object.vm_object->true_share))) {
7822 &new_entry->object.vm_object,
7845 old_entry->object.vm_object,
7914 * Finds the VM object, offset, and
7919 * Returns the (object, offset, protection) for
7927 * existence of the returned object, it is returned
7942 vm_object_t *object, /* OUT */
8011 *real_map = entry->object.sub_map;
8019 if(*real_map == entry->object.sub_map)
8023 vm_map_lock_read(entry->object.sub_map);
8025 /* reset base to map before cow object */
8027 /* the new cow object */
8033 vm_map_lock_read(entry->object.sub_map);
8039 vm_map_lock_read(entry->object.sub_map);
8042 /* follow the maps down to the object */
8050 *var_map = map = entry->object.sub_map;
8068 /* find the attenuated shadow of the underlying object */
8071 /* in english the submap object may extend beyond the */
8073 /* of it. For our purposes, we only care if the object */
8113 sub_object = submap_entry->object.vm_object;
8120 submap_entry->object.vm_object = sub_object;
8134 /* object in the submap, bypassing the */
8151 /* set up shadow object */
8186 /* new copy object, */
8207 /* object */
8218 * with a possibly different VM object and/or offset.
8236 /* substitute copy object for */
8238 vm_map_deallocate(entry->object.sub_map);
8240 entry->object.vm_object = copy_object;
8324 * Make a new object, and place it in the
8325 * object chain. Note that no new references
8327 * map to the new object.
8334 vm_object_shadow(&entry->object.vm_object,
8339 entry->object.vm_object->shadowed = TRUE;
8354 * Create an object if necessary.
8356 if (entry->object.vm_object == VM_OBJECT_NULL) {
8363 entry->object.vm_object = vm_object_allocate(
8370 * Return the object/offset from this entry. If the entry
8376 *object = entry->object.vm_object;
8391 * Lock the object to prevent it from disappearing
8394 vm_object_lock(*object);
8396 vm_object_lock_shared(*object);
8624 vm_map_lock_read(curr_entry->object.sub_map);
8645 curr_map = curr_entry->object.sub_map;
8701 submap_info->object_id = (uint32_t) curr_entry->object.vm_object;
8711 short_info->object_id = (uint32_t) curr_entry->object.vm_object;
8743 curr_entry->object.sub_map->ref_count;
8987 if (entry->object.vm_object == 0 || entry->is_sub_map) {
9001 obj = entry->object.vm_object;
9066 if ((entry->object.vm_object == 0) ||
9068 (entry->object.vm_object->phys_contiguous)) {
9074 obj = entry->object.vm_object;
9142 obj = entry->object.vm_object;
9160 /* object is locked on entry and locked on return */
9167 vm_object_t object,
9180 shadow = object->shadow;
9181 caller_object = object;
9186 if ( !(object->pager_trusted) && !(object->internal))
9189 if ((p = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
9199 if(object != caller_object)
9200 vm_object_unlock(object);
9205 if (object->existence_map) {
9206 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_EXISTS) {
9210 if(object != caller_object)
9211 vm_object_unlock(object);
9215 } else if (object->internal &&
9216 object->alive &&
9217 !object->terminating &&
9218 object->pager_ready) {
9222 vm_object_paging_begin(object);
9223 pager = object->pager;
9224 vm_object_unlock(object);
9228 offset + object->paging_offset,
9233 vm_object_lock(object);
9234 vm_object_paging_end(object);
9239 if (object != caller_object)
9240 vm_object_unlock(object);
9258 if(object != caller_object)
9259 vm_object_unlock(object);
9261 offset = offset + object->shadow_offset;
9262 object = shadow;
9263 shadow = object->shadow;
9266 if(object != caller_object)
9267 vm_object_unlock(object);
9275 vm_object_t object)
9281 if (entry->object.vm_object == 0)
9289 chk_obj = entry->object.vm_object;
9293 if (chk_obj == object)
9338 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
9367 vm_map_deallocate(prev_entry->object.sub_map);
9369 vm_object_deallocate(prev_entry->object.vm_object);
9496 entry->object.sub_map,
9501 if(entry->object.vm_object) {
9503 vm_object_t object;
9513 object = entry->object.vm_object;
9514 base_object = object;
9517 vm_object_lock(object);
9521 object, offset);
9530 } else if (object->shadow) {
9531 offset = offset + object->shadow_offset;
9532 last_object = object;
9533 object = object->shadow;
9540 if (base_object != object) {
9541 vm_object_unlock(object);
9543 object = base_object;
9549 vm_object_unlock(object);
9698 shadows = vm_follow_object(entry->object.vm_object);
9739 entry->object.sub_map,
9742 iprintf("object = %08X offset = %016llX - ",
9743 entry->object.vm_object,
9836 * Pretty-print a copy object for ddb.
9849 printf("copy object 0x%x\n", copy);
9860 printf("[object]");
9885 iprintf("object=0x%x\n", copy->cpy_object);
9937 vm_object_t object,
9960 new_entry->object.vm_object = object;
10019 vm_object_t object;
10086 vm_map_reference(src_entry->object.sub_map);
10087 object = VM_OBJECT_NULL;
10089 object = src_entry->object.vm_object;
10091 if (object == VM_OBJECT_NULL) {
10092 object = vm_object_allocate(entry_size);
10094 src_entry->object.vm_object = object;
10095 } else if (object->copy_strategy !=
10100 * the right object.
10103 } else if (src_entry->needs_copy || object->shadowed ||
10104 (object->internal && !object->true_share &&
10106 object->size > entry_size)) {
10108 vm_object_shadow(&src_entry->object.vm_object,
10123 src_entry->object.vm_object,
10137 object = src_entry->object.vm_object;
10142 vm_object_lock(object);
10143 vm_object_reference_locked(object); /* object ref. for new entry */
10144 if (object->copy_strategy ==
10146 object->copy_strategy =
10149 vm_object_unlock(object);
10176 object = VM_OBJECT_NULL;
10178 vm_object_copy_quickly(&new_entry->object.vm_object,
10199 vm_object_pmap_protect(object,
10211 * Throw away the old object reference of the new entry.
10213 vm_object_deallocate(object);
10220 * already hold a reference on the object.
10232 vm_object_lock(object);
10234 object,
10238 &new_entry->object.vm_object);
10244 object,
10247 &new_entry->object.vm_object,
10255 * Throw away the old object reference of the new entry.
10257 vm_object_deallocate(object);
10276 * same object/offset are still present.
10279 object.vm_object);
10287 vm_object_reference(object);
10314 vm_object_deallocate(src_entry->object.vm_object);
10326 * one vm memory object. Protections and
10401 vm_object_deallocate(entry->object.vm_object);
10403 vm_map_deallocate(entry->object.sub_map);
10766 vm_object_t object;
10771 * underlaying object. Return with an error if anything is amiss.
10804 object = entry->object.vm_object;
10805 if (object == VM_OBJECT_NULL) {
10813 vm_object_lock(object);
10816 entry->vme_end - entry->vme_start != object->size) {
10819 * object at once.
10822 vm_object_unlock(object);
10828 kr = vm_object_purgable_control(object, control, state);
10830 vm_object_unlock(object);
10843 vm_object_t object;
10860 offset += map_entry->offset; /* adjust to target object offset */
10862 if (map_entry->object.vm_object != VM_OBJECT_NULL) {
10864 object = map_entry->object.vm_object;
10868 sub_map = map_entry->object.sub_map;
10879 vm_object_lock(object);
10883 m = vm_page_lookup(object, offset);
10890 if (object->existence_map) {
10891 if (vm_external_state_get(object->existence_map, offset)
10901 if (object->internal &&
10902 object->alive &&
10903 !object->terminating &&
10904 object->pager_ready) {
10908 vm_object_paging_begin(object);
10909 pager = object->pager;
10910 vm_object_unlock(object);
10914 offset + object->paging_offset,
10919 vm_object_lock(object);
10920 vm_object_paging_end(object);
10930 if (object->shadow != VM_OBJECT_NULL) {
10933 offset += object->shadow_offset;
10934 shadow = object->shadow;
10937 vm_object_unlock(object);
10939 object = shadow;
10942 if (!object->internal)
10951 /* of entities holding a ref on the object, they may not be mapping */
10952 /* the object or may not be mapping the section holding the */
10960 *ref_count = object->ref_count;
10962 if (top_object == TRUE && object->shadow)
10987 vm_object_unlock(object);
10997 * memory manager engaging in a memory object synchronize dialog with
11020 * The memory object attributes have not yet been implemented, this
11069 vm_object_t object;
11131 local_map = entry->object.sub_map;
11143 object = entry->object.vm_object;
11146 * We can't sync this object if the object has not been
11149 if (object == VM_OBJECT_NULL) {
11157 vm_object_lock(object);
11163 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
11169 vm_object_deactivate_pages(object, offset,
11171 vm_object_unlock(object);
11176 * We can't sync this object if there isn't a pager.
11180 if ((object->pager == MEMORY_OBJECT_NULL) ||
11181 (object->internal) || (object->private)) {
11182 vm_object_unlock(object);
11187 * keep reference on the object until syncing is done
11189 vm_object_reference_locked(object);
11190 vm_object_unlock(object);
11194 do_sync_req = vm_object_sync(object,
11207 if ((sync_flags & VM_SYNC_INVALIDATE) && object->resident_page_count == 0) {
11211 vm_object_lock(object);
11213 object->pages_created = 0;
11214 object->pages_used = 0;
11215 object->sequential = 0;
11216 object->last_alloc = 0;
11218 vm_object_unlock(object);
11220 vm_object_deallocate(object);
11225 vm_object_lock(object);
11226 offset += object->paging_offset;
11230 new_msr->object = object;
11235 * We can't sync this object if there isn't a pager. The
11236 * pager can disappear anytime we're not holding the object
11240 pager = object->pager;
11243 vm_object_unlock(object);
11244 vm_object_deallocate(object);
11248 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
11262 vm_object_unlock(object);
11264 vm_object_lock(object);
11270 queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q);
11272 vm_object_paging_begin(object);
11273 vm_object_unlock(object);
11283 vm_object_lock(object);
11284 vm_object_paging_end(object);
11285 vm_object_unlock(object);
11303 vm_object_deallocate(msr->object);
11377 * object. Doesn't consume the port ref; produces a map ref,
11388 vm_object_t object;
11413 object = named_entry->backing.object;
11418 vm_object_reference(named_entry->backing.object);
11429 return object;