• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:object

298 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po, ro, ioerr, iosync)    \
304 if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \
305 vm_object_paging_begin(object); \
306 vm_object_unlock(object); \
322 vm_object_lock(object); \
323 vm_object_paging_end(object); \
332 * memory object. For each page in the given range,
363 vm_object_t object;
376 object = memory_object_control_to_vm_object(control);
377 if (object == VM_OBJECT_NULL)
386 * Lock the object, and acquire a paging reference to
389 vm_object_lock(object);
390 vm_object_paging_begin(object);
391 offset -= object->paging_offset;
393 (void)vm_object_update(object,
396 vm_object_paging_end(object);
397 vm_object_unlock(object);
424 vm_object_t object;
426 object = memory_object_control_to_vm_object(control);
427 if (object == VM_OBJECT_NULL)
430 return vm_object_release_name(object, flags);
438 * Shut down a memory object, despite the
447 vm_object_t object;
449 object = memory_object_control_to_vm_object(control);
450 if (object == VM_OBJECT_NULL)
453 return (vm_object_destroy(object, reason));
460 * range within an object to its memory manager. Much the
482 vm_object_t object,
493 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
494 (integer_t)object, offset, size, should_flush, should_return);
497 * Lock the object, and acquire a paging reference to
501 vm_object_lock(object);
502 vm_object_paging_begin(object);
512 rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL,
520 vm_object_paging_end(object);
521 vm_object_unlock(object);
530 vm_object_t object,
551 offset < offset_end && object->resident_page_count;
558 LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
563 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
568 (integer_t)object, offset, page_lock_result, 0, 0);
577 LIST_REQ_PAGEOUT_PAGES(object,
591 LIST_REQ_PAGEOUT_PAGES(object,
597 PAGE_SLEEP(object, m, THREAD_UNINT);
609 * object if we issue the LIST_REQ_PAGEOUT
614 LIST_REQ_PAGEOUT_PAGES(object,
622 PAGE_SLEEP(object, m, THREAD_UNINT);
663 LIST_REQ_PAGEOUT_PAGES(object,
676 * Called with object locked and paging ref taken.
680 register vm_object_t object,
718 * XXX vm_map_copy interface. Need to understand object
730 while ((copy_object = object->copy) != VM_OBJECT_NULL) {
734 * to hold the object stable until we gain control of the
735 * copy object so we have to be careful how we approach this
739 * we 'won' the lock on the copy object...
740 * no need to hold the object lock any longer...
741 * take a real reference on the copy object because
745 * will keep the copy object from going away if that happens
747 vm_object_unlock(object);
751 vm_object_unlock(object);
756 vm_object_lock(object);
801 copy_object = object;
834 page->object, top_page);
868 if (object != copy_object)
870 vm_object_lock(object);
878 if (copy_object != VM_OBJECT_NULL && copy_object != object) {
881 vm_object_lock(object);
885 if (copy_object != VM_OBJECT_NULL && copy_object != object) {
891 * delete the ref the COW was holding on the target object
893 vm_object_deallocate(object);
897 vm_object_lock(object);
905 * however, the object lock will get dropped while processing
925 if ((object->resident_page_count < RESIDENT_LIMIT) &&
926 (atop_64(size) > (unsigned)(object->resident_page_count/(8 * MAX_EXTENTS)))) {
938 m = (vm_page_t) queue_first(&object->memq);
940 while (!queue_end(&object->memq, (queue_entry_t) m)) {
1020 if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno,
1036 * memory object outstanding but they will not overlap.
1045 vm_object_t object;
1048 object = memory_object_control_to_vm_object(control);
1051 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
1052 (integer_t)object, offset, length, 0, 0);
1058 if (object == VM_OBJECT_NULL)
1061 vm_object_lock(object);
1066 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
1068 queue_remove(&object->msr_q, msr, msync_req_t, msr_q);
1073 if (queue_end(&object->msr_q, (queue_entry_t)msr)) {
1074 vm_object_unlock(object);
1079 vm_object_unlock(object);
1089 vm_object_t object,
1099 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
1100 (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
1102 if (object == VM_OBJECT_NULL)
1127 vm_object_lock(object);
1132 assert(!object->internal);
1133 object_became_ready = !object->pager_ready;
1134 object->copy_strategy = copy_strategy;
1135 object->can_persist = may_cache;
1136 object->temporary = temporary;
1137 object->silent_overwrite = silent_overwrite;
1138 object->advisory_pageout = advisory_pageout;
1146 object->pager_ready = TRUE;
1147 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
1150 vm_object_unlock(object);
1156 * Set the memory object attribute as provided.
1170 vm_object_t object;
1179 object = memory_object_control_to_vm_object(control);
1180 if (object == VM_OBJECT_NULL)
1183 vm_object_lock(object);
1185 temporary = object->temporary;
1186 may_cache = object->can_persist;
1187 copy_strategy = object->copy_strategy;
1188 silent_overwrite = object->silent_overwrite;
1189 advisory_pageout = object->advisory_pageout;
1191 invalidate = object->invalidate;
1193 vm_object_unlock(object);
1303 return (vm_object_set_attributes_common(object,
1319 vm_object_t object;
1321 object = memory_object_control_to_vm_object(control);
1322 if (object == VM_OBJECT_NULL)
1325 vm_object_lock(object);
1338 behave->copy_strategy = object->copy_strategy;
1339 behave->temporary = object->temporary;
1341 behave->invalidate = object->invalidate;
1360 behave->copy_strategy = object->copy_strategy;
1361 behave->temporary = object->temporary;
1363 behave->invalidate = object->invalidate;
1367 behave->advisory_pageout = object->advisory_pageout;
1368 behave->silent_overwrite = object->silent_overwrite;
1384 perf->may_cache = object->can_persist;
1400 attr->may_cache = object->can_persist;
1401 attr->copy_strategy = object->copy_strategy;
1417 attr->copy_strategy = object->copy_strategy;
1419 attr->may_cache_object = object->can_persist;
1420 attr->temporary = object->temporary;
1431 vm_object_unlock(object);
1447 vm_object_t object;
1487 /* offset from beginning of named entry offset in object */
1496 object = vm_object_enter(named_entry->backing.pager,
1501 if (object == VM_OBJECT_NULL) {
1509 vm_object_lock(object);
1510 vm_object_reference_locked(object);
1511 named_entry->backing.object = object;
1515 /* wait for object to be ready */
1516 while (!object->pager_ready) {
1517 vm_object_wait(object,
1520 vm_object_lock(object);
1522 vm_object_unlock(object);
1525 /* an already mapped object. If the object is */
1527 /* object cannot be mapped until it is ready */
1530 object = named_entry->backing.object;
1531 vm_object_reference(object);
1539 object = memory_object_control_to_vm_object(control);
1540 if (object == VM_OBJECT_NULL)
1542 vm_object_reference(object);
1546 if (object == VM_OBJECT_NULL)
1549 if (!object->private) {
1552 if (object->phys_contiguous) {
1561 ret = vm_object_iopl_request(object,
1568 vm_object_deallocate(object);
1591 vm_object_t object;
1593 object = memory_object_control_to_vm_object(control);
1594 if (object == VM_OBJECT_NULL)
1597 return vm_object_upl_request(object,
1628 vm_object_t object;
1630 object = memory_object_control_to_vm_object(control);
1631 if (object == VM_OBJECT_NULL)
1634 return vm_object_super_upl_request(object,
1648 vm_object_t object;
1650 object = memory_object_control_to_vm_object(control);
1652 if (object == VM_OBJECT_NULL || object->paging_offset > *start)
1655 *start -= object->paging_offset;
1657 vm_object_cluster_size(object, (vm_object_offset_t *)start, length, (vm_object_fault_info_t)fault_info);
1659 *start += object->paging_offset;
1816 /* the UPL regimen but takes place on the object rather than on a UPL */
1826 vm_object_t object;
1828 object = memory_object_control_to_vm_object(control);
1829 if (object == VM_OBJECT_NULL)
1832 return vm_object_page_op(object, offset, ops, phys_entry, flags);
1853 vm_object_t object;
1855 object = memory_object_control_to_vm_object(control);
1856 if (object == VM_OBJECT_NULL)
1859 return vm_object_range_op(object,
1872 vm_object_t object;
1876 object = memory_object_control_to_vm_object(control);
1877 if (object == VM_OBJECT_NULL)
1880 if (object->resident_page_count)
1891 vm_object_t object;
1893 object = memory_object_control_to_vm_object(control);
1894 if (object == VM_OBJECT_NULL)
1897 vm_object_lock(object);
1898 object->code_signed = is_signed;
1899 vm_object_unlock(object);
1918 vm_object_t object)
1924 control->moc_object = object;
1933 vm_object_t object)
1936 (control->moc_object != object));
1937 control->moc_object = object;
2017 __unused memory_object_t object)
2155 * each time a "named" VM object gets mapped directly or indirectly
2156 * (copy-on-write mapping). A "named" VM object has an extra reference held
2158 * memory object (and its VM object) can be reclaimed.
2160 * the mappings of that memory object have been removed.
2162 * For a given VM object, calls to memory_object_map() and memory_object_unmap()
2163 * are serialized (through object->mapping_in_progress), to ensure that the
2164 * pager gets a consistent view of the mapping status of the memory object.
2166 * This allows the pager to keep track of how many times a memory object