Lines Matching refs:object

127 	vm_object_t object;
165 vm_object_pip_wakeup(fs->object);
166 VM_OBJECT_WUNLOCK(fs->object);
167 if (fs->object != fs->first_object) {
186 * current object *might* contain the page.
190 #define TRYPAGER (fs.object->type != OBJT_DEFAULT && \
258 * Find the backing store object and offset into it to begin the
297 * Make a reference to this object to prevent its disposal while we
318 * Search for the page at object/offset.
320 fs.object = fs.first_object;
324 * If the object is dead, we stop here
326 if (fs.object->flags & OBJ_DEAD) {
334 fs.m = vm_page_lookup(fs.object, fs.pindex);
359 if (fs.object != fs.first_object) {
362 VM_OBJECT_WUNLOCK(fs.object);
364 VM_OBJECT_WLOCK(fs.object);
374 if (fs.m == vm_page_lookup(fs.object,
378 vm_object_pip_wakeup(fs.object);
379 VM_OBJECT_WUNLOCK(fs.object);
404 if (TRYPAGER || fs.object == fs.first_object) {
405 if (fs.pindex >= fs.object->size) {
411 * Allocate a new page for this object/offset pair.
421 if ((fs.object->flags & OBJ_COLORED) == 0) {
422 fs.object->flags |= OBJ_COLORED;
423 fs.object->pg_color = atop(vaddr) -
429 if (fs.object->type != OBJT_VNODE &&
430 fs.object->backing_object == NULL)
432 fs.m = vm_page_alloc(fs.object, fs.pindex,
500 * fs.object and the pages are exclusive busied.
504 if (fs.object->type == OBJT_VNODE) {
505 vp = fs.object->handle;
535 ("vm_fault: vnode-backed object mapped by system map"));
541 * object as the page for this fault. If they do,
542 * then they are faulted in also into the object. The
555 vm_pager_get_pages(fs.object, marray, faultcount,
569 fs.m = vm_page_lookup(fs.object, fs.pindex);
580 * object/offset); before doing so, we must get back
581 * our object lock to preserve our invariant.
586 * If this is the top-level object, we must leave the
588 * past us, and inserting the page in that object at
611 if (fs.object != fs.first_object) {
624 * We get here if the object has default pager (or unwiring)
627 if (fs.object == fs.first_object)
631 * Move on to the next object. Lock the next object before
634 fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
635 next_object = fs.object->backing_object;
638 * If there's no object left, fill the page in the top
639 * object with zeros.
641 if (fs.object != fs.first_object) {
642 vm_object_pip_wakeup(fs.object);
643 VM_OBJECT_WUNLOCK(fs.object);
645 fs.object = fs.first_object;
648 VM_OBJECT_WLOCK(fs.object);
664 KASSERT(fs.object != next_object,
665 ("object loop %p", next_object));
668 if (fs.object != fs.first_object)
669 vm_object_pip_wakeup(fs.object);
670 VM_OBJECT_WUNLOCK(fs.object);
671 fs.object = next_object;
678 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
684 * top-level object, we have to copy it into a new page owned by the
685 * top-level object.
687 if (fs.object != fs.first_object) {
695 * backing object has no other refs to it, and cannot
697 * move the page from the backing object to the
698 * first object. Note that we must mark the page
699 * dirty in the first object so that it will go out
705 * Only one shadow object
707 (fs.object->shadow_count == 1) &&
711 (fs.object->ref_count == 1) &&
713 * No one else can look this object up
715 (fs.object->handle == NULL) &&
717 * No other ways to look the object up
719 ((fs.object->type == OBJT_DEFAULT) ||
720 (fs.object->type == OBJT_SWAP)) &&
725 fs.object == fs.first_object->backing_object) {
734 * process'es object. The page is
763 * We no longer need the old page or object.
768 * fs.object != fs.first_object due to above
771 vm_object_pip_wakeup(fs.object);
772 VM_OBJECT_WUNLOCK(fs.object);
776 fs.object = fs.first_object;
780 VM_OBJECT_WLOCK(fs.object);
832 * Check whether the protection has changed or the object has
857 vm_object_set_writeable_dirty(fs.object);
898 VM_OBJECT_WUNLOCK(fs.object);
909 VM_OBJECT_WLOCK(fs.object);
945 * faulting pindex within the first object of the shadow chain.
950 vm_object_t first_object, object;
954 object = fs->object;
955 VM_OBJECT_ASSERT_WLOCKED(object);
957 if (first_object != object) {
959 VM_OBJECT_WUNLOCK(object);
961 VM_OBJECT_WLOCK(object);
972 m = first_object != object ? fs->first_m : fs->m;
992 if (first_object != object)
1009 vm_object_t object;
1014 object = entry->object.vm_object;
1037 lobject = object;
1043 0, ("vm_fault_prefault: unaligned object offset"));
1112 * The object lock is not held here.
1207 * Create new shadow object backing dst_entry with private copy of
1222 vm_object_t backing_object, dst_object, object, src_object;
1236 src_object = src_entry->object.vm_object;
1241 * Create the top-level object for the destination entry. (Doesn't
1252 KASSERT(upgrade || dst_entry->object.vm_object == NULL,
1254 dst_entry->object.vm_object = dst_object;
1283 * range, copying each page from the source object to the
1284 * destination object. Since the source is wired, those pages
1286 * Since the destination object does share any backing storage
1287 * with the source object, all of its pages must be dirtied,
1295 * Allocate a page in the destination object.
1308 * Find the page in the source object, and copy it in.
1313 object = src_object;
1315 while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
1317 (backing_object = object->backing_object) != NULL) {
1322 pindex += OFF_TO_IDX(object->backing_object_offset);
1323 VM_OBJECT_RUNLOCK(object);
1324 object = backing_object;
1329 VM_OBJECT_RUNLOCK(object);
1392 vm_object_t object;
1397 VM_OBJECT_ASSERT_WLOCKED(m->object);
1399 object = m->object;
1406 if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
1443 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
1477 if (endpindex > object->size)
1478 endpindex = object->size;
1482 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |