• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:object

121  *	(virtual memory object, offset) to page lookup, employs
198 * object structure, be sure to add initialization
369 * for the object/offset-to-page hash table headers.
390 m->object = VM_OBJECT_NULL; /* reset later */
509 * Calculate object shift value for hashing algorithm:
512 * hash shifts the object left by
864 * Distributes the object/offset key pair among hash buckets.
868 #define vm_page_hash(object, offset) (\
869 ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
876 * Inserts the given mem entry into the object/object-page
877 * table and object list.
879 * The object must be locked.
884 vm_object_t object,
887 vm_page_insert_internal(mem, object, offset, FALSE);
894 vm_object_t object,
901 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
902 (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
906 if (object == vm_submap_object) {
911 vm_object_lock_assert_exclusive(object);
913 if (mem->tabled || mem->object != VM_OBJECT_NULL)
916 mem, object, offset, mem->object, mem->offset);
918 assert(!object->internal || offset < object->size);
922 assert(object->pageout == mem->pageout);
924 assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
927 * Record the object/offset pair in this page
930 mem->object = object;
937 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
948 * Now link into the object's list of backed pages.
951 VM_PAGE_INSERT(mem, object);
955 * Show that the object has one more resident page.
958 object->resident_page_count++;
960 if (object->purgable == VM_PURGABLE_VOLATILE) {
968 } else if (object->purgable == VM_PURGABLE_EMPTY &&
982 * remove any existing page at the given offset in object.
984 * The object and page queues must be locked.
990 register vm_object_t object,
997 vm_object_lock_assert_exclusive(object);
1001 if (mem->tabled || mem->object != VM_OBJECT_NULL)
1004 mem, object, offset, mem->object, mem->offset);
1007 * Record the object/offset pair in this page
1010 mem->object = object;
1018 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
1026 if (m->object == object && m->offset == offset) {
1052 * offset for this object... remove it from
1053 * the object and free it back to the free list
1058 found_m->object = VM_OBJECT_NULL;
1060 object->resident_page_count--;
1062 if (object->purgable == VM_PURGABLE_VOLATILE) {
1074 * Now link into the object's list of backed pages.
1077 VM_PAGE_INSERT(mem, object);
1081 * And show that the object has one more resident
1085 object->resident_page_count++;
1087 if (object->purgable == VM_PURGABLE_VOLATILE) {
1089 } else if (object->purgable == VM_PURGABLE_EMPTY) {
1099 * Removes the given mem entry from the object/offset-page
1100 * table and the object page list.
1102 * The object and page queues must be locked.
1113 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
1114 (integer_t)mem->object, (integer_t)mem->offset,
1119 vm_object_lock_assert_exclusive(mem->object);
1129 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
1150 * Now remove from the object's list of backed pages.
1156 * And show that the object has one fewer resident
1160 mem->object->resident_page_count--;
1162 if (mem->object->purgable == VM_PURGABLE_VOLATILE) {
1167 mem->object = VM_OBJECT_NULL;
1174 * Returns the page associated with the object/offset
1177 * The object must be locked. No side effects.
1190 register vm_object_t object,
1197 vm_object_lock_assert_held(object);
1198 mem = object->memq_hint;
1201 assert(mem->object == object);
1209 if (! queue_end(&object->memq, qe)) {
1213 assert(next_page->object == object);
1217 object->memq_hint = next_page; /* new hint */
1223 if (! queue_end(&object->memq, qe)) {
1227 assert(prev_page->object == object);
1231 object->memq_hint = prev_page; /* new hint */
1237 * Search the hash table for this object/offset pair
1239 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
1242 * since we hold the object lock, we are guaranteed that no
1243 * new pages can be inserted into this object... this in turn
1258 if ((mem->object == object) && (mem->offset == offset))
1264 if (object->memq_hint != VM_PAGE_NULL) {
1267 assert(mem->object == object);
1268 object->memq_hint = mem;
1280 * current object to the specified target object/offset.
1282 * The object must be locked.
1291 assert(mem->object != new_object);
1295 * The encryption key is based on the page's memory object
1297 * another VM object changes its "pager" and "paging_offset"
1300 * One exception is VM object collapsing, where we transfer pages
1301 * from one backing object to its parent object. This operation also
1311 * Changes to mem->object require the page lock because
1312 * the pageout daemon uses that lock to get the object.
1316 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
1616 assert(mem->object == VM_OBJECT_NULL);
1724 assert(mem->object == VM_OBJECT_NULL);
1823 assert(mem->object == VM_OBJECT_NULL);
1962 * with this VM object/offset pair.
1969 vm_object_t object,
1974 vm_object_lock_assert_exclusive(object);
1979 vm_page_insert(mem, object, offset);
1986 vm_object_t object,
1991 vm_object_lock_assert_exclusive(object);
1996 vm_page_insert(mem, object, offset);
2007 * the object and returned to the caller.
2012 vm_object_t object,
2017 vm_object_lock_assert_exclusive(object);
2022 vm_page_insert(mem, object, offset);
2035 * disassociating it with any VM object.
2052 if (mem->object)
2053 vm_object_lock_assert_exclusive(mem->object);
2065 * the page from its VM object, so that we can find out on
2073 vm_page_remove(mem); /* clears tabled, object, offset */
2160 if (mem->tabled || mem->object)
2290 * The page's object and the page queues must be locked.
2297 // dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
2301 if (mem->object)
2302 vm_object_lock_assert_exclusive(mem->object);
2380 * The page's object and the page queues must be locked.
2387 // dbgLog(current_thread(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
2392 if (mem->object)
2393 vm_object_lock_assert_exclusive(mem->object);
2400 assert(mem->object != kernel_object);
2402 if (mem->object->purgable == VM_PURGABLE_EMPTY) {
2445 assert(m->object != kernel_object);
2488 m->dirty && m->object->internal &&
2489 (m->object->purgable == VM_PURGABLE_DENY ||
2490 m->object->purgable == VM_PURGABLE_NONVOLATILE ||
2491 m->object->purgable == VM_PURGABLE_VOLATILE )) {
2497 (!m->fictitious && m->object->named && m->object->ref_count == 1)) {
2532 assert(m->object != kernel_object);
2564 !m->fictitious && m->dirty && m->object->internal &&
2565 (m->object->purgable == VM_PURGABLE_DENY ||
2566 m->object->purgable == VM_PURGABLE_NONVOLATILE ||
2567 m->object->purgable == VM_PURGABLE_VOLATILE )) {
2598 assert(m->object != kernel_object);
2655 m->object->pages_created++;
2706 assert(m->object != kernel_object);
2784 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
2785 (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
2833 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
2834 (integer_t)src_m->object, src_m->offset,
2835 (integer_t)dest_m->object, dest_m->offset,
2852 if (src_m->object != VM_OBJECT_NULL &&
2853 src_m->object->code_signed) {
2855 * We're copying a page from a code-signed object.
2990 * must be removed from the object they are tabled on... this requires taking the
2991 * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
3105 * tabled on an object...
3251 vm_object_t object;
3256 object = m1->object;
3258 if (object != locked_object) {
3263 if (vm_object_lock_try(object))
3264 locked_object = object;
3327 * now put the substitute page on the object
3567 iprintf("object=0x%x", p->object);