1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61/*
62 *	Virtual memory object module.
63 */
64
65#include <sys/cdefs.h>
66__FBSDID("$FreeBSD$");
67
68#include "opt_vm.h"
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/lock.h>
73#include <sys/mman.h>
74#include <sys/mount.h>
75#include <sys/kernel.h>
76#include <sys/sysctl.h>
77#include <sys/mutex.h>
78#include <sys/proc.h>		/* for curproc, pageproc */
79#include <sys/socket.h>
80#include <sys/resourcevar.h>
81#include <sys/vnode.h>
82#include <sys/vmmeter.h>
83#include <sys/sx.h>
84
85#include <vm/vm.h>
86#include <vm/vm_param.h>
87#include <vm/pmap.h>
88#include <vm/vm_map.h>
89#include <vm/vm_object.h>
90#include <vm/vm_page.h>
91#include <vm/vm_pageout.h>
92#include <vm/vm_pager.h>
93#include <vm/swap_pager.h>
94#include <vm/vm_kern.h>
95#include <vm/vm_extern.h>
96#include <vm/vm_reserv.h>
97#include <vm/uma.h>
98
99static int old_msync;
100SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
101    "Use old (insecure) msync behavior");
102
103static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
104		    int pagerflags, int flags, boolean_t *clearobjflags,
105		    boolean_t *eio);
106static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
107		    boolean_t *clearobjflags);
108static void	vm_object_qcollapse(vm_object_t object);
109static void	vm_object_vndeallocate(vm_object_t object);
110
111/*
112 *	Virtual memory objects maintain the actual data
113 *	associated with allocated virtual memory.  A given
114 *	page of memory exists within exactly one object.
115 *
116 *	An object is only deallocated when all "references"
117 *	are given up.  Only one "reference" to a given
118 *	region of an object should be writeable.
119 *
120 *	Associated with each object is a list of all resident
121 *	memory pages belonging to that object; this list is
122 *	maintained by the "vm_page" module, and locked by the object's
123 *	lock.
124 *
125 *	Each object also records a "pager" routine which is
126 *	used to retrieve (and store) pages to the proper backing
127 *	storage.  In addition, objects may be backed by other
128 *	objects from which they were virtual-copied.
129 *
130 *	The only items within the object structure which are
131 *	modified after time of creation are:
132 *		reference count		locked by object's lock
133 *		pager routine		locked by object's lock
134 *
135 */
136
137struct object_q vm_object_list;
138struct mtx vm_object_list_mtx;	/* lock for object list and count */
139
140struct vm_object kernel_object_store;
141struct vm_object kmem_object_store;
142
143static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0,
144    "VM object stats");
145
146static long object_collapses;
147SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
148    &object_collapses, 0, "VM object collapses");
149
150static long object_bypasses;
151SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
152    &object_bypasses, 0, "VM object bypasses");
153
154static uma_zone_t obj_zone;
155
156static int vm_object_zinit(void *mem, int size, int flags);
157
158#ifdef INVARIANTS
159static void vm_object_zdtor(void *mem, int size, void *arg);
160
161static void
162vm_object_zdtor(void *mem, int size, void *arg)
163{
164	vm_object_t object;
165
166	object = (vm_object_t)mem;
167	KASSERT(TAILQ_EMPTY(&object->memq),
168	    ("object %p has resident pages",
169	    object));
170#if VM_NRESERVLEVEL > 0
171	KASSERT(LIST_EMPTY(&object->rvq),
172	    ("object %p has reservations",
173	    object));
174#endif
175	KASSERT(object->cache == NULL,
176	    ("object %p has cached pages",
177	    object));
178	KASSERT(object->paging_in_progress == 0,
179	    ("object %p paging_in_progress = %d",
180	    object, object->paging_in_progress));
181	KASSERT(object->resident_page_count == 0,
182	    ("object %p resident_page_count = %d",
183	    object, object->resident_page_count));
184	KASSERT(object->shadow_count == 0,
185	    ("object %p shadow_count = %d",
186	    object, object->shadow_count));
187}
188#endif
189
190static int
191vm_object_zinit(void *mem, int size, int flags)
192{
193	vm_object_t object;
194
195	object = (vm_object_t)mem;
196	bzero(&object->mtx, sizeof(object->mtx));
197	VM_OBJECT_LOCK_INIT(object, "standard object");
198
199	/* These are true for any object that has been freed */
200	object->paging_in_progress = 0;
201	object->resident_page_count = 0;
202	object->shadow_count = 0;
203	return (0);
204}
205
206void
207_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
208{
209
210	TAILQ_INIT(&object->memq);
211	LIST_INIT(&object->shadow_head);
212
213	object->root = NULL;
214	object->type = type;
215	object->size = size;
216	object->generation = 1;
217	object->ref_count = 1;
218	object->memattr = VM_MEMATTR_DEFAULT;
219	object->flags = 0;
220	object->cred = NULL;
221	object->charge = 0;
222	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
223		object->flags = OBJ_ONEMAPPING;
224	object->pg_color = 0;
225	object->handle = NULL;
226	object->backing_object = NULL;
227	object->backing_object_offset = (vm_ooffset_t) 0;
228#if VM_NRESERVLEVEL > 0
229	LIST_INIT(&object->rvq);
230#endif
231	object->cache = NULL;
232
233	mtx_lock(&vm_object_list_mtx);
234	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
235	mtx_unlock(&vm_object_list_mtx);
236}
237
238/*
239 *	vm_object_init:
240 *
241 *	Initialize the VM objects module.
242 */
243void
244vm_object_init(void)
245{
246	TAILQ_INIT(&vm_object_list);
247	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
248
249	VM_OBJECT_LOCK_INIT(kernel_object, "kernel object");
250	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
251	    kernel_object);
252#if VM_NRESERVLEVEL > 0
253	kernel_object->flags |= OBJ_COLORED;
254	kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
255#endif
256
257	VM_OBJECT_LOCK_INIT(kmem_object, "kmem object");
258	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
259	    kmem_object);
260#if VM_NRESERVLEVEL > 0
261	kmem_object->flags |= OBJ_COLORED;
262	kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
263#endif
264
265	/*
266	 * The lock portion of struct vm_object must be type stable due
267	 * to vm_pageout_fallback_object_lock locking a vm object
268	 * without holding any references to it.
269	 */
270	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
271#ifdef INVARIANTS
272	    vm_object_zdtor,
273#else
274	    NULL,
275#endif
276	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
277}
278
279void
280vm_object_clear_flag(vm_object_t object, u_short bits)
281{
282
283	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
284	object->flags &= ~bits;
285}
286
287/*
288 *	Sets the default memory attribute for the specified object.  Pages
289 *	that are allocated to this object are by default assigned this memory
290 *	attribute.
291 *
292 *	Presently, this function must be called before any pages are allocated
293 *	to the object.  In the future, this requirement may be relaxed for
294 *	"default" and "swap" objects.
295 */
296int
297vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
298{
299
300	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
301	switch (object->type) {
302	case OBJT_DEFAULT:
303	case OBJT_DEVICE:
304	case OBJT_PHYS:
305	case OBJT_SG:
306	case OBJT_SWAP:
307	case OBJT_VNODE:
308		if (!TAILQ_EMPTY(&object->memq))
309			return (KERN_FAILURE);
310		break;
311	case OBJT_DEAD:
312		return (KERN_INVALID_ARGUMENT);
313	}
314	object->memattr = memattr;
315	return (KERN_SUCCESS);
316}
317
318void
319vm_object_pip_add(vm_object_t object, short i)
320{
321
322	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
323	object->paging_in_progress += i;
324}
325
326void
327vm_object_pip_subtract(vm_object_t object, short i)
328{
329
330	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
331	object->paging_in_progress -= i;
332}
333
334void
335vm_object_pip_wakeup(vm_object_t object)
336{
337
338	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
339	object->paging_in_progress--;
340	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
341		vm_object_clear_flag(object, OBJ_PIPWNT);
342		wakeup(object);
343	}
344}
345
346void
347vm_object_pip_wakeupn(vm_object_t object, short i)
348{
349
350	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
351	if (i)
352		object->paging_in_progress -= i;
353	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
354		vm_object_clear_flag(object, OBJ_PIPWNT);
355		wakeup(object);
356	}
357}
358
359void
360vm_object_pip_wait(vm_object_t object, char *waitid)
361{
362
363	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
364	while (object->paging_in_progress) {
365		object->flags |= OBJ_PIPWNT;
366		msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
367	}
368}
369
370/*
371 *	vm_object_allocate:
372 *
373 *	Returns a new object with the given size.
374 */
375vm_object_t
376vm_object_allocate(objtype_t type, vm_pindex_t size)
377{
378	vm_object_t object;
379
380	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
381	_vm_object_allocate(type, size, object);
382	return (object);
383}
384
385
386/*
387 *	vm_object_reference:
388 *
389 *	Gets another reference to the given object.  Note: OBJ_DEAD
390 *	objects can be referenced during final cleaning.
391 */
392void
393vm_object_reference(vm_object_t object)
394{
395	if (object == NULL)
396		return;
397	VM_OBJECT_LOCK(object);
398	vm_object_reference_locked(object);
399	VM_OBJECT_UNLOCK(object);
400}
401
402/*
403 *	vm_object_reference_locked:
404 *
405 *	Gets another reference to the given object.
406 *
407 *	The object must be locked.
408 */
409void
410vm_object_reference_locked(vm_object_t object)
411{
412	struct vnode *vp;
413
414	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
415	object->ref_count++;
416	if (object->type == OBJT_VNODE) {
417		vp = object->handle;
418		vref(vp);
419	}
420}
421
422/*
423 * Handle deallocating an object of type OBJT_VNODE.
424 */
425static void
426vm_object_vndeallocate(vm_object_t object)
427{
428	struct vnode *vp = (struct vnode *) object->handle;
429
430	VFS_ASSERT_GIANT(vp->v_mount);
431	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
432	KASSERT(object->type == OBJT_VNODE,
433	    ("vm_object_vndeallocate: not a vnode object"));
434	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
435#ifdef INVARIANTS
436	if (object->ref_count == 0) {
437		vprint("vm_object_vndeallocate", vp);
438		panic("vm_object_vndeallocate: bad object reference count");
439	}
440#endif
441
442	if (object->ref_count > 1) {
443		object->ref_count--;
444		VM_OBJECT_UNLOCK(object);
445		/* vrele may need the vnode lock. */
446		vrele(vp);
447	} else {
448		vhold(vp);
449		VM_OBJECT_UNLOCK(object);
450		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
451		vdrop(vp);
452		VM_OBJECT_LOCK(object);
453		object->ref_count--;
454		if (object->type == OBJT_DEAD) {
455			VM_OBJECT_UNLOCK(object);
456			VOP_UNLOCK(vp, 0);
457		} else {
458			if (object->ref_count == 0)
459				VOP_UNSET_TEXT(vp);
460			VM_OBJECT_UNLOCK(object);
461			vput(vp);
462		}
463	}
464}
465
466/*
467 *	vm_object_deallocate:
468 *
469 *	Release a reference to the specified object,
470 *	gained either through a vm_object_allocate
471 *	or a vm_object_reference call.  When all references
472 *	are gone, storage associated with this object
473 *	may be relinquished.
474 *
475 *	No object may be locked.
476 */
477void
478vm_object_deallocate(vm_object_t object)
479{
480	vm_object_t temp;
481
482	while (object != NULL) {
483		int vfslocked;
484
485		vfslocked = 0;
486	restart:
487		VM_OBJECT_LOCK(object);
488		if (object->type == OBJT_VNODE) {
489			struct vnode *vp = (struct vnode *) object->handle;
490
491			/*
492			 * Conditionally acquire Giant for a vnode-backed
493			 * object.  We have to be careful since the type of
494			 * a vnode object can change while the object is
495			 * unlocked.
496			 */
497			if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) {
498				vfslocked = 1;
499				if (!mtx_trylock(&Giant)) {
500					VM_OBJECT_UNLOCK(object);
501					mtx_lock(&Giant);
502					goto restart;
503				}
504			}
505			vm_object_vndeallocate(object);
506			VFS_UNLOCK_GIANT(vfslocked);
507			return;
508		} else
509			/*
510			 * This is to handle the case that the object
511			 * changed type while we dropped its lock to
512			 * obtain Giant.
513			 */
514			VFS_UNLOCK_GIANT(vfslocked);
515
516		KASSERT(object->ref_count != 0,
517			("vm_object_deallocate: object deallocated too many times: %d", object->type));
518
519		/*
520		 * If the reference count goes to 0 we start calling
521		 * vm_object_terminate() on the object chain.
522		 * A ref count of 1 may be a special case depending on the
523		 * shadow count being 0 or 1.
524		 */
525		object->ref_count--;
526		if (object->ref_count > 1) {
527			VM_OBJECT_UNLOCK(object);
528			return;
529		} else if (object->ref_count == 1) {
530			if (object->shadow_count == 0 &&
531			    object->handle == NULL &&
532			    (object->type == OBJT_DEFAULT ||
533			     object->type == OBJT_SWAP)) {
534				vm_object_set_flag(object, OBJ_ONEMAPPING);
535			} else if ((object->shadow_count == 1) &&
536			    (object->handle == NULL) &&
537			    (object->type == OBJT_DEFAULT ||
538			     object->type == OBJT_SWAP)) {
539				vm_object_t robject;
540
541				robject = LIST_FIRST(&object->shadow_head);
542				KASSERT(robject != NULL,
543				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
544					 object->ref_count,
545					 object->shadow_count));
546				if (!VM_OBJECT_TRYLOCK(robject)) {
547					/*
548					 * Avoid a potential deadlock.
549					 */
550					object->ref_count++;
551					VM_OBJECT_UNLOCK(object);
552					/*
553					 * More likely than not the thread
554					 * holding robject's lock has lower
555					 * priority than the current thread.
556					 * Let the lower priority thread run.
557					 */
558					pause("vmo_de", 1);
559					continue;
560				}
561				/*
562				 * Collapse object into its shadow unless its
563				 * shadow is dead.  In that case, object will
564				 * be deallocated by the thread that is
565				 * deallocating its shadow.
566				 */
567				if ((robject->flags & OBJ_DEAD) == 0 &&
568				    (robject->handle == NULL) &&
569				    (robject->type == OBJT_DEFAULT ||
570				     robject->type == OBJT_SWAP)) {
571
572					robject->ref_count++;
573retry:
574					if (robject->paging_in_progress) {
575						VM_OBJECT_UNLOCK(object);
576						vm_object_pip_wait(robject,
577						    "objde1");
578						temp = robject->backing_object;
579						if (object == temp) {
580							VM_OBJECT_LOCK(object);
581							goto retry;
582						}
583					} else if (object->paging_in_progress) {
584						VM_OBJECT_UNLOCK(robject);
585						object->flags |= OBJ_PIPWNT;
586						msleep(object,
587						    VM_OBJECT_MTX(object),
588						    PDROP | PVM, "objde2", 0);
589						VM_OBJECT_LOCK(robject);
590						temp = robject->backing_object;
591						if (object == temp) {
592							VM_OBJECT_LOCK(object);
593							goto retry;
594						}
595					} else
596						VM_OBJECT_UNLOCK(object);
597
598					if (robject->ref_count == 1) {
599						robject->ref_count--;
600						object = robject;
601						goto doterm;
602					}
603					object = robject;
604					vm_object_collapse(object);
605					VM_OBJECT_UNLOCK(object);
606					continue;
607				}
608				VM_OBJECT_UNLOCK(robject);
609			}
610			VM_OBJECT_UNLOCK(object);
611			return;
612		}
613doterm:
614		temp = object->backing_object;
615		if (temp != NULL) {
616			VM_OBJECT_LOCK(temp);
617			LIST_REMOVE(object, shadow_list);
618			temp->shadow_count--;
619			VM_OBJECT_UNLOCK(temp);
620			object->backing_object = NULL;
621		}
622		/*
623		 * Don't double-terminate, we could be in a termination
624		 * recursion due to the terminate having to sync data
625		 * to disk.
626		 */
627		if ((object->flags & OBJ_DEAD) == 0)
628			vm_object_terminate(object);
629		else
630			VM_OBJECT_UNLOCK(object);
631		object = temp;
632	}
633}
634
635/*
636 *	vm_object_destroy removes the object from the global object list
637 *      and frees the space for the object.
638 */
639void
640vm_object_destroy(vm_object_t object)
641{
642
643	/*
644	 * Remove the object from the global object list.
645	 */
646	mtx_lock(&vm_object_list_mtx);
647	TAILQ_REMOVE(&vm_object_list, object, object_list);
648	mtx_unlock(&vm_object_list_mtx);
649
650	/*
651	 * Release the allocation charge.
652	 */
653	if (object->cred != NULL) {
654		KASSERT(object->type == OBJT_DEFAULT ||
655		    object->type == OBJT_SWAP,
656		    ("vm_object_terminate: non-swap obj %p has cred",
657		     object));
658		swap_release_by_cred(object->charge, object->cred);
659		object->charge = 0;
660		crfree(object->cred);
661		object->cred = NULL;
662	}
663
664	/*
665	 * Free the space for the object.
666	 */
667	uma_zfree(obj_zone, object);
668}
669
670/*
671 *	vm_object_terminate actually destroys the specified object, freeing
672 *	up all previously used resources.
673 *
674 *	The object must be locked.
675 *	This routine may block.
676 */
677void
678vm_object_terminate(vm_object_t object)
679{
680	vm_page_t p, p_next;
681
682	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
683
684	/*
685	 * Make sure no one uses us.
686	 */
687	vm_object_set_flag(object, OBJ_DEAD);
688
689	/*
690	 * wait for the pageout daemon to be done with the object
691	 */
692	vm_object_pip_wait(object, "objtrm");
693
694	KASSERT(!object->paging_in_progress,
695		("vm_object_terminate: pageout in progress"));
696
697	/*
698	 * Clean and free the pages, as appropriate. All references to the
699	 * object are gone, so we don't need to lock it.
700	 */
701	if (object->type == OBJT_VNODE) {
702		struct vnode *vp = (struct vnode *)object->handle;
703
704		/*
705		 * Clean pages and flush buffers.
706		 */
707		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
708		VM_OBJECT_UNLOCK(object);
709
710		vinvalbuf(vp, V_SAVE, 0, 0);
711
712		VM_OBJECT_LOCK(object);
713	}
714
715	KASSERT(object->ref_count == 0,
716		("vm_object_terminate: object with references, ref_count=%d",
717		object->ref_count));
718
719	/*
720	 * Free any remaining pageable pages.  This also removes them from the
721	 * paging queues.  However, don't free wired pages, just remove them
722	 * from the object.  Rather than incrementally removing each page from
723	 * the object, the page and object are reset to any empty state.
724	 */
725	TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
726		KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0,
727		    ("vm_object_terminate: freeing busy page %p", p));
728		vm_page_lock(p);
729		/*
730		 * Optimize the page's removal from the object by resetting
731		 * its "object" field.  Specifically, if the page is not
732		 * wired, then the effect of this assignment is that
733		 * vm_page_free()'s call to vm_page_remove() will return
734		 * immediately without modifying the page or the object.
735		 */
736		p->object = NULL;
737		if (p->wire_count == 0) {
738			vm_page_free(p);
739			PCPU_INC(cnt.v_pfree);
740		}
741		vm_page_unlock(p);
742	}
743	/*
744	 * If the object contained any pages, then reset it to an empty state.
745	 * None of the object's fields, including "resident_page_count", were
746	 * modified by the preceding loop.
747	 */
748	if (object->resident_page_count != 0) {
749		object->root = NULL;
750		TAILQ_INIT(&object->memq);
751		object->resident_page_count = 0;
752		if (object->type == OBJT_VNODE)
753			vdrop(object->handle);
754	}
755
756#if VM_NRESERVLEVEL > 0
757	if (__predict_false(!LIST_EMPTY(&object->rvq)))
758		vm_reserv_break_all(object);
759#endif
760	if (__predict_false(object->cache != NULL))
761		vm_page_cache_free(object, 0, 0);
762
763	/*
764	 * Let the pager know object is dead.
765	 */
766	vm_pager_deallocate(object);
767	VM_OBJECT_UNLOCK(object);
768
769	vm_object_destroy(object);
770}
771
772/*
773 * Make the page read-only so that we can clear the object flags.  However, if
774 * this is a nosync mmap then the object is likely to stay dirty so do not
775 * mess with the page and do not clear the object flags.  Returns TRUE if the
776 * page should be flushed, and FALSE otherwise.
777 */
778static boolean_t
779vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags)
780{
781
782	/*
783	 * If we have been asked to skip nosync pages and this is a
784	 * nosync page, skip it.  Note that the object flags were not
785	 * cleared in this case so we do not have to set them.
786	 */
787	if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) {
788		*clearobjflags = FALSE;
789		return (FALSE);
790	} else {
791		pmap_remove_write(p);
792		return (p->dirty != 0);
793	}
794}
795
796/*
797 *	vm_object_page_clean
798 *
799 *	Clean all dirty pages in the specified range of object.  Leaves page
800 * 	on whatever queue it is currently on.   If NOSYNC is set then do not
801 *	write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
802 *	leaving the object dirty.
803 *
804 *	When stuffing pages asynchronously, allow clustering.  XXX we need a
805 *	synchronous clustering mode implementation.
806 *
807 *	Odd semantics: if start == end, we clean everything.
808 *
809 *	The object must be locked.
810 *
811 *	Returns FALSE if some page from the range was not written, as
812 *	reported by the pager, and TRUE otherwise.
813 */
814boolean_t
815vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
816    int flags)
817{
818	vm_page_t np, p;
819	vm_pindex_t pi, tend, tstart;
820	int curgeneration, n, pagerflags;
821	boolean_t clearobjflags, eio, res;
822
823	mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED);
824	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
825	KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
826	if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
827	    object->resident_page_count == 0)
828		return (TRUE);
829
830	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
831	    VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
832	pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0;
833
834	tstart = OFF_TO_IDX(start);
835	tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK);
836	clearobjflags = tstart == 0 && tend >= object->size;
837	res = TRUE;
838
839rescan:
840	curgeneration = object->generation;
841
842	for (p = vm_page_find_least(object, tstart); p != NULL; p = np) {
843		pi = p->pindex;
844		if (pi >= tend)
845			break;
846		np = TAILQ_NEXT(p, listq);
847		if (p->valid == 0)
848			continue;
849		if (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
850			if (object->generation != curgeneration) {
851				if ((flags & OBJPC_SYNC) != 0)
852					goto rescan;
853				else
854					clearobjflags = FALSE;
855			}
856			np = vm_page_find_least(object, pi);
857			continue;
858		}
859		if (!vm_object_page_remove_write(p, flags, &clearobjflags))
860			continue;
861
862		n = vm_object_page_collect_flush(object, p, pagerflags,
863		    flags, &clearobjflags, &eio);
864		if (eio) {
865			res = FALSE;
866			clearobjflags = FALSE;
867		}
868		if (object->generation != curgeneration) {
869			if ((flags & OBJPC_SYNC) != 0)
870				goto rescan;
871			else
872				clearobjflags = FALSE;
873		}
874
875		/*
876		 * If the VOP_PUTPAGES() did a truncated write, so
877		 * that even the first page of the run is not fully
878		 * written, vm_pageout_flush() returns 0 as the run
879		 * length.  Since the condition that caused truncated
880		 * write may be permanent, e.g. exhausted free space,
881		 * accepting n == 0 would cause an infinite loop.
882		 *
883		 * Forwarding the iterator leaves the unwritten page
884		 * behind, but there is not much we can do there if
885		 * filesystem refuses to write it.
886		 */
887		if (n == 0) {
888			n = 1;
889			clearobjflags = FALSE;
890		}
891		np = vm_page_find_least(object, pi + n);
892	}
893#if 0
894	VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0);
895#endif
896
897	if (clearobjflags)
898		vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
899	return (res);
900}
901
902static int
903vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
904    int flags, boolean_t *clearobjflags, boolean_t *eio)
905{
906	vm_page_t ma[vm_pageout_page_count], p_first, tp;
907	int count, i, mreq, runlen;
908
909	mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED);
910	vm_page_lock_assert(p, MA_NOTOWNED);
911	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
912
913	count = 1;
914	mreq = 0;
915
916	for (tp = p; count < vm_pageout_page_count; count++) {
917		tp = vm_page_next(tp);
918		if (tp == NULL || tp->busy != 0 || (tp->oflags & VPO_BUSY) != 0)
919			break;
920		if (!vm_object_page_remove_write(tp, flags, clearobjflags))
921			break;
922	}
923
924	for (p_first = p; count < vm_pageout_page_count; count++) {
925		tp = vm_page_prev(p_first);
926		if (tp == NULL || tp->busy != 0 || (tp->oflags & VPO_BUSY) != 0)
927			break;
928		if (!vm_object_page_remove_write(tp, flags, clearobjflags))
929			break;
930		p_first = tp;
931		mreq++;
932	}
933
934	for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++)
935		ma[i] = tp;
936
937	vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio);
938	return (runlen);
939}
940
941/*
942 * Note that there is absolutely no sense in writing out
943 * anonymous objects, so we track down the vnode object
944 * to write out.
945 * We invalidate (remove) all pages from the address space
946 * for semantic correctness.
947 *
948 * If the backing object is a device object with unmanaged pages, then any
949 * mappings to the specified range of pages must be removed before this
950 * function is called.
951 *
952 * Note: certain anonymous maps, such as MAP_NOSYNC maps,
953 * may start out with a NULL object.
954 */
955boolean_t
956vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
957    boolean_t syncio, boolean_t invalidate)
958{
959	vm_object_t backing_object;
960	struct vnode *vp;
961	struct mount *mp;
962	int error, flags, fsync_after;
963	boolean_t res;
964
965	if (object == NULL)
966		return (TRUE);
967	res = TRUE;
968	error = 0;
969	VM_OBJECT_LOCK(object);
970	while ((backing_object = object->backing_object) != NULL) {
971		VM_OBJECT_LOCK(backing_object);
972		offset += object->backing_object_offset;
973		VM_OBJECT_UNLOCK(object);
974		object = backing_object;
975		if (object->size < OFF_TO_IDX(offset + size))
976			size = IDX_TO_OFF(object->size) - offset;
977	}
978	/*
979	 * Flush pages if writing is allowed, invalidate them
980	 * if invalidation requested.  Pages undergoing I/O
981	 * will be ignored by vm_object_page_remove().
982	 *
983	 * We cannot lock the vnode and then wait for paging
984	 * to complete without deadlocking against vm_fault.
985	 * Instead we simply call vm_object_page_remove() and
986	 * allow it to block internally on a page-by-page
987	 * basis when it encounters pages undergoing async
988	 * I/O.
989	 */
990	if (object->type == OBJT_VNODE &&
991	    (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
992		int vfslocked;
993		vp = object->handle;
994		VM_OBJECT_UNLOCK(object);
995		(void) vn_start_write(vp, &mp, V_WAIT);
996		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
997		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
998		if (syncio && !invalidate && offset == 0 &&
999		    OFF_TO_IDX(size) == object->size) {
1000			/*
1001			 * If syncing the whole mapping of the file,
1002			 * it is faster to schedule all the writes in
1003			 * async mode, also allowing the clustering,
1004			 * and then wait for i/o to complete.
1005			 */
1006			flags = 0;
1007			fsync_after = TRUE;
1008		} else {
1009			flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1010			flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
1011			fsync_after = FALSE;
1012		}
1013		VM_OBJECT_LOCK(object);
1014		res = vm_object_page_clean(object, offset, offset + size,
1015		    flags);
1016		VM_OBJECT_UNLOCK(object);
1017		if (fsync_after)
1018			error = VOP_FSYNC(vp, MNT_WAIT, curthread);
1019		VOP_UNLOCK(vp, 0);
1020		VFS_UNLOCK_GIANT(vfslocked);
1021		vn_finished_write(mp);
1022		if (error != 0)
1023			res = FALSE;
1024		VM_OBJECT_LOCK(object);
1025	}
1026	if ((object->type == OBJT_VNODE ||
1027	     object->type == OBJT_DEVICE) && invalidate) {
1028		if (object->type == OBJT_DEVICE)
1029			/*
1030			 * The option OBJPR_NOTMAPPED must be passed here
1031			 * because vm_object_page_remove() cannot remove
1032			 * unmanaged mappings.
1033			 */
1034			flags = OBJPR_NOTMAPPED;
1035		else if (old_msync)
1036			flags = OBJPR_NOTWIRED;
1037		else
1038			flags = OBJPR_CLEANONLY | OBJPR_NOTWIRED;
1039		vm_object_page_remove(object, OFF_TO_IDX(offset),
1040		    OFF_TO_IDX(offset + size + PAGE_MASK), flags);
1041	}
1042	VM_OBJECT_UNLOCK(object);
1043	return (res);
1044}
1045
1046/*
1047 *	vm_object_madvise:
1048 *
1049 *	Implements the madvise function at the object/page level.
1050 *
1051 *	MADV_WILLNEED	(any object)
1052 *
1053 *	    Activate the specified pages if they are resident.
1054 *
1055 *	MADV_DONTNEED	(any object)
1056 *
1057 *	    Deactivate the specified pages if they are resident.
1058 *
1059 *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
1060 *			 OBJ_ONEMAPPING only)
1061 *
1062 *	    Deactivate and clean the specified pages if they are
1063 *	    resident.  This permits the process to reuse the pages
1064 *	    without faulting or the kernel to reclaim the pages
1065 *	    without I/O.
1066 */
1067void
1068vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
1069    int advise)
1070{
1071	vm_pindex_t tpindex;
1072	vm_object_t backing_object, tobject;
1073	vm_page_t m;
1074
1075	if (object == NULL)
1076		return;
1077	VM_OBJECT_LOCK(object);
1078	/*
1079	 * Locate and adjust resident pages
1080	 */
1081	for (; pindex < end; pindex += 1) {
1082relookup:
1083		tobject = object;
1084		tpindex = pindex;
1085shadowlookup:
1086		/*
1087		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1088		 * and those pages must be OBJ_ONEMAPPING.
1089		 */
1090		if (advise == MADV_FREE) {
1091			if ((tobject->type != OBJT_DEFAULT &&
1092			     tobject->type != OBJT_SWAP) ||
1093			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
1094				goto unlock_tobject;
1095			}
1096		} else if (tobject->type == OBJT_PHYS)
1097			goto unlock_tobject;
1098		m = vm_page_lookup(tobject, tpindex);
1099		if (m == NULL && advise == MADV_WILLNEED) {
1100			/*
1101			 * If the page is cached, reactivate it.
1102			 */
1103			m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED |
1104			    VM_ALLOC_NOBUSY);
1105		}
1106		if (m == NULL) {
1107			/*
1108			 * There may be swap even if there is no backing page
1109			 */
1110			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1111				swap_pager_freespace(tobject, tpindex, 1);
1112			/*
1113			 * next object
1114			 */
1115			backing_object = tobject->backing_object;
1116			if (backing_object == NULL)
1117				goto unlock_tobject;
1118			VM_OBJECT_LOCK(backing_object);
1119			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1120			if (tobject != object)
1121				VM_OBJECT_UNLOCK(tobject);
1122			tobject = backing_object;
1123			goto shadowlookup;
1124		} else if (m->valid != VM_PAGE_BITS_ALL)
1125			goto unlock_tobject;
1126		/*
1127		 * If the page is not in a normal state, skip it.
1128		 */
1129		vm_page_lock(m);
1130		if (m->hold_count != 0 || m->wire_count != 0) {
1131			vm_page_unlock(m);
1132			goto unlock_tobject;
1133		}
1134		KASSERT((m->flags & PG_FICTITIOUS) == 0,
1135		    ("vm_object_madvise: page %p is fictitious", m));
1136		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1137		    ("vm_object_madvise: page %p is not managed", m));
1138		if ((m->oflags & VPO_BUSY) || m->busy) {
1139			if (advise == MADV_WILLNEED) {
1140				/*
1141				 * Reference the page before unlocking and
1142				 * sleeping so that the page daemon is less
1143				 * likely to reclaim it.
1144				 */
1145				vm_page_aflag_set(m, PGA_REFERENCED);
1146			}
1147			vm_page_unlock(m);
1148			if (object != tobject)
1149				VM_OBJECT_UNLOCK(object);
1150			m->oflags |= VPO_WANTED;
1151			msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo",
1152			    0);
1153			VM_OBJECT_LOCK(object);
1154  			goto relookup;
1155		}
1156		if (advise == MADV_WILLNEED) {
1157			vm_page_activate(m);
1158		} else if (advise == MADV_DONTNEED) {
1159			vm_page_dontneed(m);
1160		} else if (advise == MADV_FREE) {
1161			/*
1162			 * Mark the page clean.  This will allow the page
1163			 * to be freed up by the system.  However, such pages
1164			 * are often reused quickly by malloc()/free()
1165			 * so we do not do anything that would cause
1166			 * a page fault if we can help it.
1167			 *
1168			 * Specifically, we do not try to actually free
1169			 * the page now nor do we try to put it in the
1170			 * cache (which would cause a page fault on reuse).
1171			 *
1172			 * But we do make the page is freeable as we
1173			 * can without actually taking the step of unmapping
1174			 * it.
1175			 */
1176			pmap_clear_modify(m);
1177			m->dirty = 0;
1178			m->act_count = 0;
1179			vm_page_dontneed(m);
1180		}
1181		vm_page_unlock(m);
1182		if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1183			swap_pager_freespace(tobject, tpindex, 1);
1184unlock_tobject:
1185		if (tobject != object)
1186			VM_OBJECT_UNLOCK(tobject);
1187	}
1188	VM_OBJECT_UNLOCK(object);
1189}
1190
1191/*
1192 *	vm_object_shadow:
1193 *
1194 *	Create a new object which is backed by the
1195 *	specified existing object range.  The source
1196 *	object reference is deallocated.
1197 *
1198 *	The new object and offset into that object
1199 *	are returned in the source parameters.
1200 */
1201void
1202vm_object_shadow(
1203	vm_object_t *object,	/* IN/OUT */
1204	vm_ooffset_t *offset,	/* IN/OUT */
1205	vm_size_t length)
1206{
1207	vm_object_t source;
1208	vm_object_t result;
1209
1210	source = *object;
1211
1212	/*
1213	 * Don't create the new object if the old object isn't shared.
1214	 */
1215	if (source != NULL) {
1216		VM_OBJECT_LOCK(source);
1217		if (source->ref_count == 1 &&
1218		    source->handle == NULL &&
1219		    (source->type == OBJT_DEFAULT ||
1220		     source->type == OBJT_SWAP)) {
1221			VM_OBJECT_UNLOCK(source);
1222			return;
1223		}
1224		VM_OBJECT_UNLOCK(source);
1225	}
1226
1227	/*
1228	 * Allocate a new object with the given length.
1229	 */
1230	result = vm_object_allocate(OBJT_DEFAULT, atop(length));
1231
1232	/*
1233	 * The new object shadows the source object, adding a reference to it.
1234	 * Our caller changes his reference to point to the new object,
1235	 * removing a reference to the source object.  Net result: no change
1236	 * of reference count.
1237	 *
1238	 * Try to optimize the result object's page color when shadowing
1239	 * in order to maintain page coloring consistency in the combined
1240	 * shadowed object.
1241	 */
1242	result->backing_object = source;
1243	/*
1244	 * Store the offset into the source object, and fix up the offset into
1245	 * the new object.
1246	 */
1247	result->backing_object_offset = *offset;
1248	if (source != NULL) {
1249		VM_OBJECT_LOCK(source);
1250		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1251		source->shadow_count++;
1252#if VM_NRESERVLEVEL > 0
1253		result->flags |= source->flags & OBJ_COLORED;
1254		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
1255		    ((1 << (VM_NFREEORDER - 1)) - 1);
1256#endif
1257		VM_OBJECT_UNLOCK(source);
1258	}
1259
1260
1261	/*
1262	 * Return the new things
1263	 */
1264	*offset = 0;
1265	*object = result;
1266}
1267
1268/*
1269 *	vm_object_split:
1270 *
1271 * Split the pages in a map entry into a new object.  This affords
1272 * easier removal of unused pages, and keeps object inheritance from
1273 * being a negative impact on memory usage.
1274 */
1275void
1276vm_object_split(vm_map_entry_t entry)
1277{
1278	vm_page_t m, m_next;
1279	vm_object_t orig_object, new_object, source;
1280	vm_pindex_t idx, offidxstart;
1281	vm_size_t size;
1282
1283	orig_object = entry->object.vm_object;
1284	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1285		return;
1286	if (orig_object->ref_count <= 1)
1287		return;
1288	VM_OBJECT_UNLOCK(orig_object);
1289
1290	offidxstart = OFF_TO_IDX(entry->offset);
1291	size = atop(entry->end - entry->start);
1292
1293	/*
1294	 * If swap_pager_copy() is later called, it will convert new_object
1295	 * into a swap object.
1296	 */
1297	new_object = vm_object_allocate(OBJT_DEFAULT, size);
1298
1299	/*
1300	 * At this point, the new object is still private, so the order in
1301	 * which the original and new objects are locked does not matter.
1302	 */
1303	VM_OBJECT_LOCK(new_object);
1304	VM_OBJECT_LOCK(orig_object);
1305	source = orig_object->backing_object;
1306	if (source != NULL) {
1307		VM_OBJECT_LOCK(source);
1308		if ((source->flags & OBJ_DEAD) != 0) {
1309			VM_OBJECT_UNLOCK(source);
1310			VM_OBJECT_UNLOCK(orig_object);
1311			VM_OBJECT_UNLOCK(new_object);
1312			vm_object_deallocate(new_object);
1313			VM_OBJECT_LOCK(orig_object);
1314			return;
1315		}
1316		LIST_INSERT_HEAD(&source->shadow_head,
1317				  new_object, shadow_list);
1318		source->shadow_count++;
1319		vm_object_reference_locked(source);	/* for new_object */
1320		vm_object_clear_flag(source, OBJ_ONEMAPPING);
1321		VM_OBJECT_UNLOCK(source);
1322		new_object->backing_object_offset =
1323			orig_object->backing_object_offset + entry->offset;
1324		new_object->backing_object = source;
1325	}
1326	if (orig_object->cred != NULL) {
1327		new_object->cred = orig_object->cred;
1328		crhold(orig_object->cred);
1329		new_object->charge = ptoa(size);
1330		KASSERT(orig_object->charge >= ptoa(size),
1331		    ("orig_object->charge < 0"));
1332		orig_object->charge -= ptoa(size);
1333	}
1334retry:
1335	m = vm_page_find_least(orig_object, offidxstart);
1336	for (; m != NULL && (idx = m->pindex - offidxstart) < size;
1337	    m = m_next) {
1338		m_next = TAILQ_NEXT(m, listq);
1339
1340		/*
1341		 * We must wait for pending I/O to complete before we can
1342		 * rename the page.
1343		 *
1344		 * We do not have to VM_PROT_NONE the page as mappings should
1345		 * not be changed by this operation.
1346		 */
1347		if ((m->oflags & VPO_BUSY) || m->busy) {
1348			VM_OBJECT_UNLOCK(new_object);
1349			m->oflags |= VPO_WANTED;
1350			msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0);
1351			VM_OBJECT_LOCK(new_object);
1352			goto retry;
1353		}
1354		vm_page_lock(m);
1355		vm_page_rename(m, new_object, idx);
1356		vm_page_unlock(m);
1357		/* page automatically made dirty by rename and cache handled */
1358		vm_page_busy(m);
1359	}
1360	if (orig_object->type == OBJT_SWAP) {
1361		/*
1362		 * swap_pager_copy() can sleep, in which case the orig_object's
1363		 * and new_object's locks are released and reacquired.
1364		 */
1365		swap_pager_copy(orig_object, new_object, offidxstart, 0);
1366
1367		/*
1368		 * Transfer any cached pages from orig_object to new_object.
1369		 */
1370		if (__predict_false(orig_object->cache != NULL))
1371			vm_page_cache_transfer(orig_object, offidxstart,
1372			    new_object);
1373	}
1374	VM_OBJECT_UNLOCK(orig_object);
1375	TAILQ_FOREACH(m, &new_object->memq, listq)
1376		vm_page_wakeup(m);
1377	VM_OBJECT_UNLOCK(new_object);
1378	entry->object.vm_object = new_object;
1379	entry->offset = 0LL;
1380	vm_object_deallocate(orig_object);
1381	VM_OBJECT_LOCK(new_object);
1382}
1383
1384#define	OBSC_TEST_ALL_SHADOWED	0x0001
1385#define	OBSC_COLLAPSE_NOWAIT	0x0002
1386#define	OBSC_COLLAPSE_WAIT	0x0004
1387
1388static int
1389vm_object_backing_scan(vm_object_t object, int op)
1390{
1391	int r = 1;
1392	vm_page_t p;
1393	vm_object_t backing_object;
1394	vm_pindex_t backing_offset_index;
1395
1396	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1397	VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
1398
1399	backing_object = object->backing_object;
1400	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1401
1402	/*
1403	 * Initial conditions
1404	 */
1405	if (op & OBSC_TEST_ALL_SHADOWED) {
1406		/*
1407		 * We do not want to have to test for the existence of cache
1408		 * or swap pages in the backing object.  XXX but with the
1409		 * new swapper this would be pretty easy to do.
1410		 *
1411		 * XXX what about anonymous MAP_SHARED memory that hasn't
1412		 * been ZFOD faulted yet?  If we do not test for this, the
1413		 * shadow test may succeed! XXX
1414		 */
1415		if (backing_object->type != OBJT_DEFAULT) {
1416			return (0);
1417		}
1418	}
1419	if (op & OBSC_COLLAPSE_WAIT) {
1420		vm_object_set_flag(backing_object, OBJ_DEAD);
1421	}
1422
1423	/*
1424	 * Our scan
1425	 */
1426	p = TAILQ_FIRST(&backing_object->memq);
1427	while (p) {
1428		vm_page_t next = TAILQ_NEXT(p, listq);
1429		vm_pindex_t new_pindex = p->pindex - backing_offset_index;
1430
1431		if (op & OBSC_TEST_ALL_SHADOWED) {
1432			vm_page_t pp;
1433
1434			/*
1435			 * Ignore pages outside the parent object's range
1436			 * and outside the parent object's mapping of the
1437			 * backing object.
1438			 *
1439			 * note that we do not busy the backing object's
1440			 * page.
1441			 */
1442			if (
1443			    p->pindex < backing_offset_index ||
1444			    new_pindex >= object->size
1445			) {
1446				p = next;
1447				continue;
1448			}
1449
1450			/*
1451			 * See if the parent has the page or if the parent's
1452			 * object pager has the page.  If the parent has the
1453			 * page but the page is not valid, the parent's
1454			 * object pager must have the page.
1455			 *
1456			 * If this fails, the parent does not completely shadow
1457			 * the object and we might as well give up now.
1458			 */
1459
1460			pp = vm_page_lookup(object, new_pindex);
1461			if (
1462			    (pp == NULL || pp->valid == 0) &&
1463			    !vm_pager_has_page(object, new_pindex, NULL, NULL)
1464			) {
1465				r = 0;
1466				break;
1467			}
1468		}
1469
1470		/*
1471		 * Check for busy page
1472		 */
1473		if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1474			vm_page_t pp;
1475
1476			if (op & OBSC_COLLAPSE_NOWAIT) {
1477				if ((p->oflags & VPO_BUSY) ||
1478				    !p->valid ||
1479				    p->busy) {
1480					p = next;
1481					continue;
1482				}
1483			} else if (op & OBSC_COLLAPSE_WAIT) {
1484				if ((p->oflags & VPO_BUSY) || p->busy) {
1485					VM_OBJECT_UNLOCK(object);
1486					p->oflags |= VPO_WANTED;
1487					msleep(p, VM_OBJECT_MTX(backing_object),
1488					    PDROP | PVM, "vmocol", 0);
1489					VM_OBJECT_LOCK(object);
1490					VM_OBJECT_LOCK(backing_object);
1491					/*
1492					 * If we slept, anything could have
1493					 * happened.  Since the object is
1494					 * marked dead, the backing offset
1495					 * should not have changed so we
1496					 * just restart our scan.
1497					 */
1498					p = TAILQ_FIRST(&backing_object->memq);
1499					continue;
1500				}
1501			}
1502
1503			KASSERT(
1504			    p->object == backing_object,
1505			    ("vm_object_backing_scan: object mismatch")
1506			);
1507
1508			/*
1509			 * Destroy any associated swap
1510			 */
1511			if (backing_object->type == OBJT_SWAP) {
1512				swap_pager_freespace(
1513				    backing_object,
1514				    p->pindex,
1515				    1
1516				);
1517			}
1518
1519			if (
1520			    p->pindex < backing_offset_index ||
1521			    new_pindex >= object->size
1522			) {
1523				/*
1524				 * Page is out of the parent object's range, we
1525				 * can simply destroy it.
1526				 */
1527				vm_page_lock(p);
1528				KASSERT(!pmap_page_is_mapped(p),
1529				    ("freeing mapped page %p", p));
1530				if (p->wire_count == 0)
1531					vm_page_free(p);
1532				else
1533					vm_page_remove(p);
1534				vm_page_unlock(p);
1535				p = next;
1536				continue;
1537			}
1538
1539			pp = vm_page_lookup(object, new_pindex);
1540			if (
1541			    (op & OBSC_COLLAPSE_NOWAIT) != 0 &&
1542			    (pp != NULL && pp->valid == 0)
1543			) {
1544				/*
1545				 * The page in the parent is not (yet) valid.
1546				 * We don't know anything about the state of
1547				 * the original page.  It might be mapped,
1548				 * so we must avoid the next if here.
1549				 *
1550				 * This is due to a race in vm_fault() where
1551				 * we must unbusy the original (backing_obj)
1552				 * page before we can (re)lock the parent.
1553				 * Hence we can get here.
1554				 */
1555				p = next;
1556				continue;
1557			}
1558			if (
1559			    pp != NULL ||
1560			    vm_pager_has_page(object, new_pindex, NULL, NULL)
1561			) {
1562				/*
1563				 * page already exists in parent OR swap exists
1564				 * for this location in the parent.  Destroy
1565				 * the original page from the backing object.
1566				 *
1567				 * Leave the parent's page alone
1568				 */
1569				vm_page_lock(p);
1570				KASSERT(!pmap_page_is_mapped(p),
1571				    ("freeing mapped page %p", p));
1572				if (p->wire_count == 0)
1573					vm_page_free(p);
1574				else
1575					vm_page_remove(p);
1576				vm_page_unlock(p);
1577				p = next;
1578				continue;
1579			}
1580
1581#if VM_NRESERVLEVEL > 0
1582			/*
1583			 * Rename the reservation.
1584			 */
1585			vm_reserv_rename(p, object, backing_object,
1586			    backing_offset_index);
1587#endif
1588
1589			/*
1590			 * Page does not exist in parent, rename the
1591			 * page from the backing object to the main object.
1592			 *
1593			 * If the page was mapped to a process, it can remain
1594			 * mapped through the rename.
1595			 */
1596			vm_page_lock(p);
1597			vm_page_rename(p, object, new_pindex);
1598			vm_page_unlock(p);
1599			/* page automatically made dirty by rename */
1600		}
1601		p = next;
1602	}
1603	return (r);
1604}
1605
1606
1607/*
1608 * this version of collapse allows the operation to occur earlier and
1609 * when paging_in_progress is true for an object...  This is not a complete
1610 * operation, but should plug 99.9% of the rest of the leaks.
1611 */
1612static void
1613vm_object_qcollapse(vm_object_t object)
1614{
1615	vm_object_t backing_object = object->backing_object;
1616
1617	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1618	VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
1619
1620	if (backing_object->ref_count != 1)
1621		return;
1622
1623	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1624}
1625
1626/*
1627 *	vm_object_collapse:
1628 *
1629 *	Collapse an object with the object backing it.
1630 *	Pages in the backing object are moved into the
1631 *	parent, and the backing object is deallocated.
1632 */
1633void
1634vm_object_collapse(vm_object_t object)
1635{
1636	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1637
1638	while (TRUE) {
1639		vm_object_t backing_object;
1640
1641		/*
1642		 * Verify that the conditions are right for collapse:
1643		 *
1644		 * The object exists and the backing object exists.
1645		 */
1646		if ((backing_object = object->backing_object) == NULL)
1647			break;
1648
1649		/*
1650		 * we check the backing object first, because it is most likely
1651		 * not collapsable.
1652		 */
1653		VM_OBJECT_LOCK(backing_object);
1654		if (backing_object->handle != NULL ||
1655		    (backing_object->type != OBJT_DEFAULT &&
1656		     backing_object->type != OBJT_SWAP) ||
1657		    (backing_object->flags & OBJ_DEAD) ||
1658		    object->handle != NULL ||
1659		    (object->type != OBJT_DEFAULT &&
1660		     object->type != OBJT_SWAP) ||
1661		    (object->flags & OBJ_DEAD)) {
1662			VM_OBJECT_UNLOCK(backing_object);
1663			break;
1664		}
1665
1666		if (
1667		    object->paging_in_progress != 0 ||
1668		    backing_object->paging_in_progress != 0
1669		) {
1670			vm_object_qcollapse(object);
1671			VM_OBJECT_UNLOCK(backing_object);
1672			break;
1673		}
1674		/*
1675		 * We know that we can either collapse the backing object (if
1676		 * the parent is the only reference to it) or (perhaps) have
1677		 * the parent bypass the object if the parent happens to shadow
1678		 * all the resident pages in the entire backing object.
1679		 *
1680		 * This is ignoring pager-backed pages such as swap pages.
1681		 * vm_object_backing_scan fails the shadowing test in this
1682		 * case.
1683		 */
1684		if (backing_object->ref_count == 1) {
1685			/*
1686			 * If there is exactly one reference to the backing
1687			 * object, we can collapse it into the parent.
1688			 */
1689			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1690
1691#if VM_NRESERVLEVEL > 0
1692			/*
1693			 * Break any reservations from backing_object.
1694			 */
1695			if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1696				vm_reserv_break_all(backing_object);
1697#endif
1698
1699			/*
1700			 * Move the pager from backing_object to object.
1701			 */
1702			if (backing_object->type == OBJT_SWAP) {
1703				/*
1704				 * swap_pager_copy() can sleep, in which case
1705				 * the backing_object's and object's locks are
1706				 * released and reacquired.
1707				 */
1708				swap_pager_copy(
1709				    backing_object,
1710				    object,
1711				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1712
1713				/*
1714				 * Free any cached pages from backing_object.
1715				 */
1716				if (__predict_false(backing_object->cache != NULL))
1717					vm_page_cache_free(backing_object, 0, 0);
1718			}
1719			/*
1720			 * Object now shadows whatever backing_object did.
1721			 * Note that the reference to
1722			 * backing_object->backing_object moves from within
1723			 * backing_object to within object.
1724			 */
1725			LIST_REMOVE(object, shadow_list);
1726			backing_object->shadow_count--;
1727			if (backing_object->backing_object) {
1728				VM_OBJECT_LOCK(backing_object->backing_object);
1729				LIST_REMOVE(backing_object, shadow_list);
1730				LIST_INSERT_HEAD(
1731				    &backing_object->backing_object->shadow_head,
1732				    object, shadow_list);
1733				/*
1734				 * The shadow_count has not changed.
1735				 */
1736				VM_OBJECT_UNLOCK(backing_object->backing_object);
1737			}
1738			object->backing_object = backing_object->backing_object;
1739			object->backing_object_offset +=
1740			    backing_object->backing_object_offset;
1741
1742			/*
1743			 * Discard backing_object.
1744			 *
1745			 * Since the backing object has no pages, no pager left,
1746			 * and no object references within it, all that is
1747			 * necessary is to dispose of it.
1748			 */
1749			KASSERT(backing_object->ref_count == 1, (
1750"backing_object %p was somehow re-referenced during collapse!",
1751			    backing_object));
1752			VM_OBJECT_UNLOCK(backing_object);
1753			vm_object_destroy(backing_object);
1754
1755			object_collapses++;
1756		} else {
1757			vm_object_t new_backing_object;
1758
1759			/*
1760			 * If we do not entirely shadow the backing object,
1761			 * there is nothing we can do so we give up.
1762			 */
1763			if (object->resident_page_count != object->size &&
1764			    vm_object_backing_scan(object,
1765			    OBSC_TEST_ALL_SHADOWED) == 0) {
1766				VM_OBJECT_UNLOCK(backing_object);
1767				break;
1768			}
1769
1770			/*
1771			 * Make the parent shadow the next object in the
1772			 * chain.  Deallocating backing_object will not remove
1773			 * it, since its reference count is at least 2.
1774			 */
1775			LIST_REMOVE(object, shadow_list);
1776			backing_object->shadow_count--;
1777
1778			new_backing_object = backing_object->backing_object;
1779			if ((object->backing_object = new_backing_object) != NULL) {
1780				VM_OBJECT_LOCK(new_backing_object);
1781				LIST_INSERT_HEAD(
1782				    &new_backing_object->shadow_head,
1783				    object,
1784				    shadow_list
1785				);
1786				new_backing_object->shadow_count++;
1787				vm_object_reference_locked(new_backing_object);
1788				VM_OBJECT_UNLOCK(new_backing_object);
1789				object->backing_object_offset +=
1790					backing_object->backing_object_offset;
1791			}
1792
1793			/*
1794			 * Drop the reference count on backing_object. Since
1795			 * its ref_count was at least 2, it will not vanish.
1796			 */
1797			backing_object->ref_count--;
1798			VM_OBJECT_UNLOCK(backing_object);
1799			object_bypasses++;
1800		}
1801
1802		/*
1803		 * Try again with this object's new backing object.
1804		 */
1805	}
1806}
1807
1808/*
1809 *	vm_object_page_remove:
1810 *
1811 *	For the given object, either frees or invalidates each of the
1812 *	specified pages.  In general, a page is freed.  However, if a page is
1813 *	wired for any reason other than the existence of a managed, wired
1814 *	mapping, then it may be invalidated but not removed from the object.
1815 *	Pages are specified by the given range ["start", "end") and the option
1816 *	OBJPR_CLEANONLY.  As a special case, if "end" is zero, then the range
1817 *	extends from "start" to the end of the object.  If the option
1818 *	OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1819 *	specified range are affected.  If the option OBJPR_NOTMAPPED is
1820 *	specified, then the pages within the specified range must have no
1821 *	mappings.  Otherwise, if this option is not specified, any mappings to
1822 *	the specified pages are removed before the pages are freed or
1823 *	invalidated.
1824 *
1825 *	In general, this operation should only be performed on objects that
1826 *	contain managed pages.  There are, however, two exceptions.  First, it
1827 *	is performed on the kernel and kmem objects by vm_map_entry_delete().
1828 *	Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
1829 *	backed pages.  In both of these cases, the option OBJPR_CLEANONLY must
1830 *	not be specified and the option OBJPR_NOTMAPPED must be specified.
1831 *
1832 *	The object must be locked.
1833 */
1834void
1835vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1836    int options)
1837{
1838	vm_page_t p, next;
1839	int wirings;
1840
1841	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1842	KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_PHYS) ||
1843	    (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
1844	    ("vm_object_page_remove: illegal options for object %p", object));
1845	if (object->resident_page_count == 0)
1846		goto skipmemq;
1847	vm_object_pip_add(object, 1);
1848again:
1849	p = vm_page_find_least(object, start);
1850
1851	/*
1852	 * Here, the variable "p" is either (1) the page with the least pindex
1853	 * greater than or equal to the parameter "start" or (2) NULL.
1854	 */
1855	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
1856		next = TAILQ_NEXT(p, listq);
1857
1858		/*
1859		 * If the page is wired for any reason besides the existence
1860		 * of managed, wired mappings, then it cannot be freed.  For
1861		 * example, fictitious pages, which represent device memory,
1862		 * are inherently wired and cannot be freed.  They can,
1863		 * however, be invalidated if the option OBJPR_CLEANONLY is
1864		 * not specified.
1865		 */
1866		vm_page_lock(p);
1867		if ((wirings = p->wire_count) != 0 &&
1868		    (wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
1869			if ((options & (OBJPR_NOTWIRED | OBJPR_NOTMAPPED)) ==
1870			    0) {
1871				pmap_remove_all(p);
1872				/* Account for removal of wired mappings. */
1873				if (wirings != 0)
1874					p->wire_count -= wirings;
1875			}
1876			if ((options & OBJPR_CLEANONLY) == 0) {
1877				p->valid = 0;
1878				vm_page_undirty(p);
1879			}
1880			goto next;
1881		}
1882		if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
1883			goto again;
1884		KASSERT((p->flags & PG_FICTITIOUS) == 0,
1885		    ("vm_object_page_remove: page %p is fictitious", p));
1886		if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) {
1887			if ((options & OBJPR_NOTMAPPED) == 0)
1888				pmap_remove_write(p);
1889			if (p->dirty)
1890				goto next;
1891		}
1892		if ((options & OBJPR_NOTMAPPED) == 0) {
1893			if ((options & OBJPR_NOTWIRED) != 0 && wirings != 0)
1894				goto next;
1895			pmap_remove_all(p);
1896			/* Account for removal of wired mappings. */
1897			if (wirings != 0) {
1898				KASSERT(p->wire_count == wirings,
1899				    ("inconsistent wire count %d %d %p",
1900				    p->wire_count, wirings, p));
1901				p->wire_count = 0;
1902				atomic_subtract_int(&cnt.v_wire_count, 1);
1903			}
1904		}
1905		vm_page_free(p);
1906next:
1907		vm_page_unlock(p);
1908	}
1909	vm_object_pip_wakeup(object);
1910skipmemq:
1911	if (__predict_false(object->cache != NULL))
1912		vm_page_cache_free(object, start, end);
1913}
1914
1915/*
1916 *	vm_object_page_cache:
1917 *
1918 *	For the given object, attempt to move the specified clean
1919 *	pages to the cache queue.  If a page is wired for any reason,
1920 *	then it will not be changed.  Pages are specified by the given
1921 *	range ["start", "end").  As a special case, if "end" is zero,
1922 *	then the range extends from "start" to the end of the object.
1923 *	Any mappings to the specified pages are removed before the
1924 *	pages are moved to the cache queue.
1925 *
1926 *	This operation should only be performed on objects that
1927 *	contain managed pages.
1928 *
1929 *	The object must be locked.
1930 */
1931void
1932vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1933{
1934	struct mtx *mtx, *new_mtx;
1935	vm_page_t p, next;
1936
1937	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1938	KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_SG &&
1939	    object->type != OBJT_PHYS),
1940	    ("vm_object_page_cache: illegal object %p", object));
1941	if (object->resident_page_count == 0)
1942		return;
1943	p = vm_page_find_least(object, start);
1944
1945	/*
1946	 * Here, the variable "p" is either (1) the page with the least pindex
1947	 * greater than or equal to the parameter "start" or (2) NULL.
1948	 */
1949	mtx = NULL;
1950	for (; p != NULL && (p->pindex < end || end == 0); p = next) {
1951		next = TAILQ_NEXT(p, listq);
1952
1953		/*
1954		 * Avoid releasing and reacquiring the same page lock.
1955		 */
1956		new_mtx = vm_page_lockptr(p);
1957		if (mtx != new_mtx) {
1958			if (mtx != NULL)
1959				mtx_unlock(mtx);
1960			mtx = new_mtx;
1961			mtx_lock(mtx);
1962		}
1963		vm_page_try_to_cache(p);
1964	}
1965	if (mtx != NULL)
1966		mtx_unlock(mtx);
1967}
1968
1969/*
1970 *	Populate the specified range of the object with valid pages.  Returns
1971 *	TRUE if the range is successfully populated and FALSE otherwise.
1972 *
1973 *	Note: This function should be optimized to pass a larger array of
1974 *	pages to vm_pager_get_pages() before it is applied to a non-
1975 *	OBJT_DEVICE object.
1976 *
1977 *	The object must be locked.
1978 */
1979boolean_t
1980vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1981{
1982	vm_page_t m, ma[1];
1983	vm_pindex_t pindex;
1984	int rv;
1985
1986	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1987	for (pindex = start; pindex < end; pindex++) {
1988		m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
1989		    VM_ALLOC_RETRY);
1990		if (m->valid != VM_PAGE_BITS_ALL) {
1991			ma[0] = m;
1992			rv = vm_pager_get_pages(object, ma, 1, 0);
1993			m = vm_page_lookup(object, pindex);
1994			if (m == NULL)
1995				break;
1996			if (rv != VM_PAGER_OK) {
1997				vm_page_lock(m);
1998				vm_page_free(m);
1999				vm_page_unlock(m);
2000				break;
2001			}
2002		}
2003		/*
2004		 * Keep "m" busy because a subsequent iteration may unlock
2005		 * the object.
2006		 */
2007	}
2008	if (pindex > start) {
2009		m = vm_page_lookup(object, start);
2010		while (m != NULL && m->pindex < pindex) {
2011			vm_page_wakeup(m);
2012			m = TAILQ_NEXT(m, listq);
2013		}
2014	}
2015	return (pindex == end);
2016}
2017
2018/*
2019 *	Routine:	vm_object_coalesce
2020 *	Function:	Coalesces two objects backing up adjoining
2021 *			regions of memory into a single object.
2022 *
2023 *	returns TRUE if objects were combined.
2024 *
2025 *	NOTE:	Only works at the moment if the second object is NULL -
2026 *		if it's not, which object do we lock first?
2027 *
2028 *	Parameters:
2029 *		prev_object	First object to coalesce
2030 *		prev_offset	Offset into prev_object
2031 *		prev_size	Size of reference to prev_object
2032 *		next_size	Size of reference to the second object
2033 *		reserved	Indicator that extension region has
2034 *				swap accounted for
2035 *
2036 *	Conditions:
2037 *	The object must *not* be locked.
2038 */
2039boolean_t
2040vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2041    vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2042{
2043	vm_pindex_t next_pindex;
2044
2045	if (prev_object == NULL)
2046		return (TRUE);
2047	VM_OBJECT_LOCK(prev_object);
2048	if (prev_object->type != OBJT_DEFAULT &&
2049	    prev_object->type != OBJT_SWAP) {
2050		VM_OBJECT_UNLOCK(prev_object);
2051		return (FALSE);
2052	}
2053
2054	/*
2055	 * Try to collapse the object first
2056	 */
2057	vm_object_collapse(prev_object);
2058
2059	/*
2060	 * Can't coalesce if: . more than one reference . paged out . shadows
2061	 * another object . has a copy elsewhere (any of which mean that the
2062	 * pages not mapped to prev_entry may be in use anyway)
2063	 */
2064	if (prev_object->backing_object != NULL) {
2065		VM_OBJECT_UNLOCK(prev_object);
2066		return (FALSE);
2067	}
2068
2069	prev_size >>= PAGE_SHIFT;
2070	next_size >>= PAGE_SHIFT;
2071	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2072
2073	if ((prev_object->ref_count > 1) &&
2074	    (prev_object->size != next_pindex)) {
2075		VM_OBJECT_UNLOCK(prev_object);
2076		return (FALSE);
2077	}
2078
2079	/*
2080	 * Account for the charge.
2081	 */
2082	if (prev_object->cred != NULL) {
2083
2084		/*
2085		 * If prev_object was charged, then this mapping,
2086		 * althought not charged now, may become writable
2087		 * later. Non-NULL cred in the object would prevent
2088		 * swap reservation during enabling of the write
2089		 * access, so reserve swap now. Failed reservation
2090		 * cause allocation of the separate object for the map
2091		 * entry, and swap reservation for this entry is
2092		 * managed in appropriate time.
2093		 */
2094		if (!reserved && !swap_reserve_by_cred(ptoa(next_size),
2095		    prev_object->cred)) {
2096			return (FALSE);
2097		}
2098		prev_object->charge += ptoa(next_size);
2099	}
2100
2101	/*
2102	 * Remove any pages that may still be in the object from a previous
2103	 * deallocation.
2104	 */
2105	if (next_pindex < prev_object->size) {
2106		vm_object_page_remove(prev_object, next_pindex, next_pindex +
2107		    next_size, 0);
2108		if (prev_object->type == OBJT_SWAP)
2109			swap_pager_freespace(prev_object,
2110					     next_pindex, next_size);
2111#if 0
2112		if (prev_object->cred != NULL) {
2113			KASSERT(prev_object->charge >=
2114			    ptoa(prev_object->size - next_pindex),
2115			    ("object %p overcharged 1 %jx %jx", prev_object,
2116				(uintmax_t)next_pindex, (uintmax_t)next_size));
2117			prev_object->charge -= ptoa(prev_object->size -
2118			    next_pindex);
2119		}
2120#endif
2121	}
2122
2123	/*
2124	 * Extend the object if necessary.
2125	 */
2126	if (next_pindex + next_size > prev_object->size)
2127		prev_object->size = next_pindex + next_size;
2128
2129	VM_OBJECT_UNLOCK(prev_object);
2130	return (TRUE);
2131}
2132
2133void
2134vm_object_set_writeable_dirty(vm_object_t object)
2135{
2136
2137	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2138	if (object->type != OBJT_VNODE)
2139		return;
2140	object->generation++;
2141	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
2142		return;
2143	vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
2144}
2145
2146#include "opt_ddb.h"
2147#ifdef DDB
2148#include <sys/kernel.h>
2149
2150#include <sys/cons.h>
2151
2152#include <ddb/ddb.h>
2153
2154static int
2155_vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2156{
2157	vm_map_t tmpm;
2158	vm_map_entry_t tmpe;
2159	vm_object_t obj;
2160	int entcount;
2161
2162	if (map == 0)
2163		return 0;
2164
2165	if (entry == 0) {
2166		tmpe = map->header.next;
2167		entcount = map->nentries;
2168		while (entcount-- && (tmpe != &map->header)) {
2169			if (_vm_object_in_map(map, object, tmpe)) {
2170				return 1;
2171			}
2172			tmpe = tmpe->next;
2173		}
2174	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2175		tmpm = entry->object.sub_map;
2176		tmpe = tmpm->header.next;
2177		entcount = tmpm->nentries;
2178		while (entcount-- && tmpe != &tmpm->header) {
2179			if (_vm_object_in_map(tmpm, object, tmpe)) {
2180				return 1;
2181			}
2182			tmpe = tmpe->next;
2183		}
2184	} else if ((obj = entry->object.vm_object) != NULL) {
2185		for (; obj; obj = obj->backing_object)
2186			if (obj == object) {
2187				return 1;
2188			}
2189	}
2190	return 0;
2191}
2192
2193static int
2194vm_object_in_map(vm_object_t object)
2195{
2196	struct proc *p;
2197
2198	/* sx_slock(&allproc_lock); */
2199	FOREACH_PROC_IN_SYSTEM(p) {
2200		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2201			continue;
2202		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2203			/* sx_sunlock(&allproc_lock); */
2204			return 1;
2205		}
2206	}
2207	/* sx_sunlock(&allproc_lock); */
2208	if (_vm_object_in_map(kernel_map, object, 0))
2209		return 1;
2210	if (_vm_object_in_map(kmem_map, object, 0))
2211		return 1;
2212	if (_vm_object_in_map(pager_map, object, 0))
2213		return 1;
2214	if (_vm_object_in_map(buffer_map, object, 0))
2215		return 1;
2216	return 0;
2217}
2218
2219DB_SHOW_COMMAND(vmochk, vm_object_check)
2220{
2221	vm_object_t object;
2222
2223	/*
2224	 * make sure that internal objs are in a map somewhere
2225	 * and none have zero ref counts.
2226	 */
2227	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2228		if (object->handle == NULL &&
2229		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2230			if (object->ref_count == 0) {
2231				db_printf("vmochk: internal obj has zero ref count: %ld\n",
2232					(long)object->size);
2233			}
2234			if (!vm_object_in_map(object)) {
2235				db_printf(
2236			"vmochk: internal obj is not in a map: "
2237			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2238				    object->ref_count, (u_long)object->size,
2239				    (u_long)object->size,
2240				    (void *)object->backing_object);
2241			}
2242		}
2243	}
2244}
2245
2246/*
2247 *	vm_object_print:	[ debug ]
2248 */
2249DB_SHOW_COMMAND(object, vm_object_print_static)
2250{
2251	/* XXX convert args. */
2252	vm_object_t object = (vm_object_t)addr;
2253	boolean_t full = have_addr;
2254
2255	vm_page_t p;
2256
2257	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2258#define	count	was_count
2259
2260	int count;
2261
2262	if (object == NULL)
2263		return;
2264
2265	db_iprintf(
2266	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2267	    object, (int)object->type, (uintmax_t)object->size,
2268	    object->resident_page_count, object->ref_count, object->flags,
2269	    object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge);
2270	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2271	    object->shadow_count,
2272	    object->backing_object ? object->backing_object->ref_count : 0,
2273	    object->backing_object, (uintmax_t)object->backing_object_offset);
2274
2275	if (!full)
2276		return;
2277
2278	db_indent += 2;
2279	count = 0;
2280	TAILQ_FOREACH(p, &object->memq, listq) {
2281		if (count == 0)
2282			db_iprintf("memory:=");
2283		else if (count == 6) {
2284			db_printf("\n");
2285			db_iprintf(" ...");
2286			count = 0;
2287		} else
2288			db_printf(",");
2289		count++;
2290
2291		db_printf("(off=0x%jx,page=0x%jx)",
2292		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2293	}
2294	if (count != 0)
2295		db_printf("\n");
2296	db_indent -= 2;
2297}
2298
2299/* XXX. */
2300#undef count
2301
2302/* XXX need this non-static entry for calling from vm_map_print. */
2303void
2304vm_object_print(
2305        /* db_expr_t */ long addr,
2306	boolean_t have_addr,
2307	/* db_expr_t */ long count,
2308	char *modif)
2309{
2310	vm_object_print_static(addr, have_addr, count, modif);
2311}
2312
2313DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2314{
2315	vm_object_t object;
2316	vm_pindex_t fidx;
2317	vm_paddr_t pa;
2318	vm_page_t m, prev_m;
2319	int rcount, nl, c;
2320
2321	nl = 0;
2322	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2323		db_printf("new object: %p\n", (void *)object);
2324		if (nl > 18) {
2325			c = cngetc();
2326			if (c != ' ')
2327				return;
2328			nl = 0;
2329		}
2330		nl++;
2331		rcount = 0;
2332		fidx = 0;
2333		pa = -1;
2334		TAILQ_FOREACH(m, &object->memq, listq) {
2335			if (m->pindex > 128)
2336				break;
2337			if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2338			    prev_m->pindex + 1 != m->pindex) {
2339				if (rcount) {
2340					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2341						(long)fidx, rcount, (long)pa);
2342					if (nl > 18) {
2343						c = cngetc();
2344						if (c != ' ')
2345							return;
2346						nl = 0;
2347					}
2348					nl++;
2349					rcount = 0;
2350				}
2351			}
2352			if (rcount &&
2353				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2354				++rcount;
2355				continue;
2356			}
2357			if (rcount) {
2358				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2359					(long)fidx, rcount, (long)pa);
2360				if (nl > 18) {
2361					c = cngetc();
2362					if (c != ' ')
2363						return;
2364					nl = 0;
2365				}
2366				nl++;
2367			}
2368			fidx = m->pindex;
2369			pa = VM_PAGE_TO_PHYS(m);
2370			rcount = 1;
2371		}
2372		if (rcount) {
2373			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2374				(long)fidx, rcount, (long)pa);
2375			if (nl > 18) {
2376				c = cngetc();
2377				if (c != ' ')
2378					return;
2379				nl = 0;
2380			}
2381			nl++;
2382		}
2383	}
2384}
2385#endif /* DDB */
2386