Lines Matching defs:object

119 	vm_object_t object;
170 vm_object_pip_wakeup(fs->object);
171 VM_OBJECT_WUNLOCK(fs->object);
172 if (fs->object != fs->first_object) {
197 VM_OBJECT_ASSERT_LOCKED(m->object);
204 vm_object_set_writeable_dirty(m->object);
355 * Find the backing store object and offset into it to begin the
397 * Try to avoid lock contention on the top-level object through
400 * level object and (2) not having to mark that object as containing
402 * object suffices, allowing multiple page faults of a similar type to
403 * run in parallel on the same top-level object.
431 * Make a reference to this object to prevent its disposal while we
448 * Search for the page at object/offset.
450 fs.object = fs.first_object;
454 * If the object is marked for imminent termination,
457 * object, return fail.
459 if ((fs.object->flags & OBJ_DEAD) != 0) {
460 dead = fs.object->type == OBJT_DEAD;
471 fs.m = vm_page_lookup(fs.object, fs.pindex);
496 if (fs.object != fs.first_object) {
499 VM_OBJECT_WUNLOCK(fs.object);
501 VM_OBJECT_WLOCK(fs.object);
511 if (fs.m == vm_page_lookup(fs.object,
515 vm_object_pip_wakeup(fs.object);
516 VM_OBJECT_WUNLOCK(fs.object);
542 if (fs.object->type != OBJT_DEFAULT ||
543 fs.object == fs.first_object) {
544 if (fs.pindex >= fs.object->size) {
550 * Allocate a new page for this object/offset pair.
560 if ((fs.object->flags & OBJ_COLORED) == 0) {
561 fs.object->flags |= OBJ_COLORED;
562 fs.object->pg_color = atop(vaddr) -
568 if (fs.object->type != OBJT_VNODE &&
569 fs.object->backing_object == NULL)
571 fs.m = vm_page_alloc(fs.object, fs.pindex,
593 if (fs.object->type != OBJT_DEFAULT) {
640 * fs.object and the pages are exclusive busied.
644 if (fs.object->type == OBJT_VNODE &&
645 (vp = fs.object->handle) != fs.vp) {
669 ("vm_fault: vnode-backed object mapped by system map"));
675 * object as the page for this fault. If they do,
676 * then they are faulted in also into the object. The
689 vm_pager_get_pages(fs.object, marray, faultcount,
703 fs.m = vm_page_lookup(fs.object, fs.pindex);
714 * object/offset); before doing so, we must get back
715 * our object lock to preserve our invariant.
720 * If this is the top-level object, we must leave the
722 * past us, and inserting the page in that object at
745 if (fs.object != fs.first_object) {
758 * We get here if the object has default pager (or unwiring)
761 if (fs.object == fs.first_object)
765 * Move on to the next object. Lock the next object before
768 fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
769 next_object = fs.object->backing_object;
772 * If there's no object left, fill the page in the top
773 * object with zeros.
775 if (fs.object != fs.first_object) {
776 vm_object_pip_wakeup(fs.object);
777 VM_OBJECT_WUNLOCK(fs.object);
779 fs.object = fs.first_object;
782 VM_OBJECT_WLOCK(fs.object);
800 KASSERT(fs.object != next_object,
801 ("object loop %p", next_object));
804 if (fs.object != fs.first_object)
805 vm_object_pip_wakeup(fs.object);
806 VM_OBJECT_WUNLOCK(fs.object);
807 fs.object = next_object;
814 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
820 * top-level object, we have to copy it into a new page owned by the
821 * top-level object.
823 if (fs.object != fs.first_object) {
831 * backing object has no other refs to it, and cannot
833 * move the page from the backing object to the
834 * first object. Note that we must mark the page
835 * dirty in the first object so that it will go out
841 * Only one shadow object
843 (fs.object->shadow_count == 1) &&
847 (fs.object->ref_count == 1) &&
849 * No one else can look this object up
851 (fs.object->handle == NULL) &&
853 * No other ways to look the object up
855 ((fs.object->type == OBJT_DEFAULT) ||
856 (fs.object->type == OBJT_SWAP)) &&
861 fs.object == fs.first_object->backing_object) {
870 * process'es object. The page is
883 fs.object, OFF_TO_IDX(
911 * We no longer need the old page or object.
916 * fs.object != fs.first_object due to above
919 vm_object_pip_wakeup(fs.object);
920 VM_OBJECT_WUNLOCK(fs.object);
924 fs.object = fs.first_object;
928 VM_OBJECT_WLOCK(fs.object);
980 * Check whether the protection has changed or the object has
1012 VM_OBJECT_WUNLOCK(fs.object);
1025 VM_OBJECT_WLOCK(fs.object);
1059 * faulting pindex within the first object of the shadow chain.
1064 vm_object_t first_object, object;
1068 object = fs->object;
1069 VM_OBJECT_ASSERT_WLOCKED(object);
1071 if (first_object != object) {
1073 VM_OBJECT_WUNLOCK(object);
1075 VM_OBJECT_WLOCK(object);
1086 m = first_object != object ? fs->first_m : fs->m;
1106 if (first_object != object)
1170 lobject = entry->object.vm_object;
1176 0, ("vm_fault_prefault: unaligned object offset"));
1242 * The object lock is not held here.
1275 * Create new shadow object backing dst_entry with private copy of
1290 vm_object_t backing_object, dst_object, object, src_object;
1305 src_object = src_entry->object.vm_object;
1313 * Create the top-level object for the destination entry. (Doesn't
1325 KASSERT(upgrade || dst_entry->object.vm_object == NULL,
1328 dst_entry->object.vm_object = dst_object;
1360 * range, copying each page from the source object to the
1361 * destination object. Since the source is wired, those pages
1363 * Since the destination object does share any backing storage
1364 * with the source object, all of its pages must be dirtied,
1372 * Find the page in the source object, and copy it in.
1378 object = src_object;
1380 while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
1381 (backing_object = object->backing_object) != NULL) {
1385 * read-only, the first object in the shadow
1393 ("vm_fault_copy_entry: main object missing page"));
1396 pindex += OFF_TO_IDX(object->backing_object_offset);
1397 if (object != dst_object)
1398 VM_OBJECT_RUNLOCK(object);
1399 object = backing_object;
1403 if (object != dst_object) {
1405 * Allocate a page in the destination object.
1412 VM_OBJECT_RUNLOCK(object);
1418 VM_OBJECT_RUNLOCK(object);
1494 vm_object_t object;
1499 VM_OBJECT_ASSERT_WLOCKED(m->object);
1501 object = m->object;
1508 if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1545 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
1579 if (endpindex > object->size)
1580 endpindex = object->size;
1584 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |