vm_object.c revision 195649
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61/*
62 *	Virtual memory object module.
63 */
64
65#include <sys/cdefs.h>
66__FBSDID("$FreeBSD: head/sys/vm/vm_object.c 195649 2009-07-12 23:31:20Z alc $");
67
68#include "opt_vm.h"
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/lock.h>
73#include <sys/mman.h>
74#include <sys/mount.h>
75#include <sys/kernel.h>
76#include <sys/sysctl.h>
77#include <sys/mutex.h>
78#include <sys/proc.h>		/* for curproc, pageproc */
79#include <sys/socket.h>
80#include <sys/resourcevar.h>
81#include <sys/vnode.h>
82#include <sys/vmmeter.h>
83#include <sys/sx.h>
84
85#include <vm/vm.h>
86#include <vm/vm_param.h>
87#include <vm/pmap.h>
88#include <vm/vm_map.h>
89#include <vm/vm_object.h>
90#include <vm/vm_page.h>
91#include <vm/vm_pageout.h>
92#include <vm/vm_pager.h>
93#include <vm/swap_pager.h>
94#include <vm/vm_kern.h>
95#include <vm/vm_extern.h>
96#include <vm/vm_reserv.h>
97#include <vm/uma.h>
98
99#define EASY_SCAN_FACTOR       8
100
101#define MSYNC_FLUSH_HARDSEQ	0x01
102#define MSYNC_FLUSH_SOFTSEQ	0x02
103
104/*
105 * msync / VM object flushing optimizations
106 */
107static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ;
108SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, CTLFLAG_RW, &msync_flush_flags, 0,
109    "Enable sequential iteration optimization");
110
111static int old_msync;
112SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
113    "Use old (insecure) msync behavior");
114
115static void	vm_object_qcollapse(vm_object_t object);
116static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags);
117static void	vm_object_vndeallocate(vm_object_t object);
118
119/*
120 *	Virtual memory objects maintain the actual data
121 *	associated with allocated virtual memory.  A given
122 *	page of memory exists within exactly one object.
123 *
124 *	An object is only deallocated when all "references"
125 *	are given up.  Only one "reference" to a given
126 *	region of an object should be writeable.
127 *
128 *	Associated with each object is a list of all resident
129 *	memory pages belonging to that object; this list is
130 *	maintained by the "vm_page" module, and locked by the object's
131 *	lock.
132 *
133 *	Each object also records a "pager" routine which is
134 *	used to retrieve (and store) pages to the proper backing
135 *	storage.  In addition, objects may be backed by other
136 *	objects from which they were virtual-copied.
137 *
138 *	The only items within the object structure which are
139 *	modified after time of creation are:
140 *		reference count		locked by object's lock
141 *		pager routine		locked by object's lock
142 *
143 */
144
145struct object_q vm_object_list;
146struct mtx vm_object_list_mtx;	/* lock for object list and count */
147
148struct vm_object kernel_object_store;
149struct vm_object kmem_object_store;
150
151SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats");
152
153static long object_collapses;
154SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD,
155    &object_collapses, 0, "VM object collapses");
156
157static long object_bypasses;
158SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD,
159    &object_bypasses, 0, "VM object bypasses");
160
161static uma_zone_t obj_zone;
162
163static int vm_object_zinit(void *mem, int size, int flags);
164
165#ifdef INVARIANTS
166static void vm_object_zdtor(void *mem, int size, void *arg);
167
168static void
169vm_object_zdtor(void *mem, int size, void *arg)
170{
171	vm_object_t object;
172
173	object = (vm_object_t)mem;
174	KASSERT(TAILQ_EMPTY(&object->memq),
175	    ("object %p has resident pages",
176	    object));
177#if VM_NRESERVLEVEL > 0
178	KASSERT(LIST_EMPTY(&object->rvq),
179	    ("object %p has reservations",
180	    object));
181#endif
182	KASSERT(object->cache == NULL,
183	    ("object %p has cached pages",
184	    object));
185	KASSERT(object->paging_in_progress == 0,
186	    ("object %p paging_in_progress = %d",
187	    object, object->paging_in_progress));
188	KASSERT(object->resident_page_count == 0,
189	    ("object %p resident_page_count = %d",
190	    object, object->resident_page_count));
191	KASSERT(object->shadow_count == 0,
192	    ("object %p shadow_count = %d",
193	    object, object->shadow_count));
194}
195#endif
196
197static int
198vm_object_zinit(void *mem, int size, int flags)
199{
200	vm_object_t object;
201
202	object = (vm_object_t)mem;
203	bzero(&object->mtx, sizeof(object->mtx));
204	VM_OBJECT_LOCK_INIT(object, "standard object");
205
206	/* These are true for any object that has been freed */
207	object->paging_in_progress = 0;
208	object->resident_page_count = 0;
209	object->shadow_count = 0;
210	return (0);
211}
212
213void
214_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
215{
216
217	TAILQ_INIT(&object->memq);
218	LIST_INIT(&object->shadow_head);
219
220	object->root = NULL;
221	object->type = type;
222	object->size = size;
223	object->generation = 1;
224	object->ref_count = 1;
225	object->memattr = VM_MEMATTR_DEFAULT;
226	object->flags = 0;
227	object->uip = NULL;
228	object->charge = 0;
229	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
230		object->flags = OBJ_ONEMAPPING;
231	object->pg_color = 0;
232	object->handle = NULL;
233	object->backing_object = NULL;
234	object->backing_object_offset = (vm_ooffset_t) 0;
235#if VM_NRESERVLEVEL > 0
236	LIST_INIT(&object->rvq);
237#endif
238	object->cache = NULL;
239
240	mtx_lock(&vm_object_list_mtx);
241	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
242	mtx_unlock(&vm_object_list_mtx);
243}
244
245/*
246 *	vm_object_init:
247 *
248 *	Initialize the VM objects module.
249 */
250void
251vm_object_init(void)
252{
253	TAILQ_INIT(&vm_object_list);
254	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
255
256	VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object");
257	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
258	    kernel_object);
259#if VM_NRESERVLEVEL > 0
260	kernel_object->flags |= OBJ_COLORED;
261	kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
262#endif
263
264	VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object");
265	_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
266	    kmem_object);
267#if VM_NRESERVLEVEL > 0
268	kmem_object->flags |= OBJ_COLORED;
269	kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
270#endif
271
272	/*
273	 * The lock portion of struct vm_object must be type stable due
274	 * to vm_pageout_fallback_object_lock locking a vm object
275	 * without holding any references to it.
276	 */
277	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
278#ifdef INVARIANTS
279	    vm_object_zdtor,
280#else
281	    NULL,
282#endif
283	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
284}
285
286void
287vm_object_clear_flag(vm_object_t object, u_short bits)
288{
289
290	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
291	object->flags &= ~bits;
292}
293
294/*
295 *	Sets the default memory attribute for the specified object.  Pages
296 *	that are allocated to this object are by default assigned this memory
297 *	attribute.
298 *
299 *	Presently, this function must be called before any pages are allocated
300 *	to the object.  In the future, this requirement may be relaxed for
301 *	"default" and "swap" objects.
302 */
303int
304vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
305{
306
307	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
308	switch (object->type) {
309	case OBJT_DEFAULT:
310	case OBJT_DEVICE:
311	case OBJT_PHYS:
312	case OBJT_SWAP:
313	case OBJT_VNODE:
314		if (!TAILQ_EMPTY(&object->memq))
315			return (KERN_FAILURE);
316		break;
317	case OBJT_DEAD:
318		return (KERN_INVALID_ARGUMENT);
319	}
320	object->memattr = memattr;
321	return (KERN_SUCCESS);
322}
323
324void
325vm_object_pip_add(vm_object_t object, short i)
326{
327
328	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
329	object->paging_in_progress += i;
330}
331
332void
333vm_object_pip_subtract(vm_object_t object, short i)
334{
335
336	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
337	object->paging_in_progress -= i;
338}
339
340void
341vm_object_pip_wakeup(vm_object_t object)
342{
343
344	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
345	object->paging_in_progress--;
346	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
347		vm_object_clear_flag(object, OBJ_PIPWNT);
348		wakeup(object);
349	}
350}
351
352void
353vm_object_pip_wakeupn(vm_object_t object, short i)
354{
355
356	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
357	if (i)
358		object->paging_in_progress -= i;
359	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
360		vm_object_clear_flag(object, OBJ_PIPWNT);
361		wakeup(object);
362	}
363}
364
365void
366vm_object_pip_wait(vm_object_t object, char *waitid)
367{
368
369	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
370	while (object->paging_in_progress) {
371		object->flags |= OBJ_PIPWNT;
372		msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
373	}
374}
375
376/*
377 *	vm_object_allocate:
378 *
379 *	Returns a new object with the given size.
380 */
381vm_object_t
382vm_object_allocate(objtype_t type, vm_pindex_t size)
383{
384	vm_object_t object;
385
386	object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
387	_vm_object_allocate(type, size, object);
388	return (object);
389}
390
391
392/*
393 *	vm_object_reference:
394 *
395 *	Gets another reference to the given object.  Note: OBJ_DEAD
396 *	objects can be referenced during final cleaning.
397 */
398void
399vm_object_reference(vm_object_t object)
400{
401	if (object == NULL)
402		return;
403	VM_OBJECT_LOCK(object);
404	vm_object_reference_locked(object);
405	VM_OBJECT_UNLOCK(object);
406}
407
408/*
409 *	vm_object_reference_locked:
410 *
411 *	Gets another reference to the given object.
412 *
413 *	The object must be locked.
414 */
415void
416vm_object_reference_locked(vm_object_t object)
417{
418	struct vnode *vp;
419
420	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
421	object->ref_count++;
422	if (object->type == OBJT_VNODE) {
423		vp = object->handle;
424		vref(vp);
425	}
426}
427
428/*
429 * Handle deallocating an object of type OBJT_VNODE.
430 */
431static void
432vm_object_vndeallocate(vm_object_t object)
433{
434	struct vnode *vp = (struct vnode *) object->handle;
435
436	VFS_ASSERT_GIANT(vp->v_mount);
437	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
438	KASSERT(object->type == OBJT_VNODE,
439	    ("vm_object_vndeallocate: not a vnode object"));
440	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
441#ifdef INVARIANTS
442	if (object->ref_count == 0) {
443		vprint("vm_object_vndeallocate", vp);
444		panic("vm_object_vndeallocate: bad object reference count");
445	}
446#endif
447
448	object->ref_count--;
449	if (object->ref_count == 0) {
450		mp_fixme("Unlocked vflag access.");
451		vp->v_vflag &= ~VV_TEXT;
452	}
453	VM_OBJECT_UNLOCK(object);
454	/*
455	 * vrele may need a vop lock
456	 */
457	vrele(vp);
458}
459
460/*
461 *	vm_object_deallocate:
462 *
463 *	Release a reference to the specified object,
464 *	gained either through a vm_object_allocate
465 *	or a vm_object_reference call.  When all references
466 *	are gone, storage associated with this object
467 *	may be relinquished.
468 *
469 *	No object may be locked.
470 */
471void
472vm_object_deallocate(vm_object_t object)
473{
474	vm_object_t temp;
475
476	while (object != NULL) {
477		int vfslocked;
478
479		vfslocked = 0;
480	restart:
481		VM_OBJECT_LOCK(object);
482		if (object->type == OBJT_VNODE) {
483			struct vnode *vp = (struct vnode *) object->handle;
484
485			/*
486			 * Conditionally acquire Giant for a vnode-backed
487			 * object.  We have to be careful since the type of
488			 * a vnode object can change while the object is
489			 * unlocked.
490			 */
491			if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) {
492				vfslocked = 1;
493				if (!mtx_trylock(&Giant)) {
494					VM_OBJECT_UNLOCK(object);
495					mtx_lock(&Giant);
496					goto restart;
497				}
498			}
499			vm_object_vndeallocate(object);
500			VFS_UNLOCK_GIANT(vfslocked);
501			return;
502		} else
503			/*
504			 * This is to handle the case that the object
505			 * changed type while we dropped its lock to
506			 * obtain Giant.
507			 */
508			VFS_UNLOCK_GIANT(vfslocked);
509
510		KASSERT(object->ref_count != 0,
511			("vm_object_deallocate: object deallocated too many times: %d", object->type));
512
513		/*
514		 * If the reference count goes to 0 we start calling
515		 * vm_object_terminate() on the object chain.
516		 * A ref count of 1 may be a special case depending on the
517		 * shadow count being 0 or 1.
518		 */
519		object->ref_count--;
520		if (object->ref_count > 1) {
521			VM_OBJECT_UNLOCK(object);
522			return;
523		} else if (object->ref_count == 1) {
524			if (object->shadow_count == 0 &&
525			    object->handle == NULL &&
526			    (object->type == OBJT_DEFAULT ||
527			     object->type == OBJT_SWAP)) {
528				vm_object_set_flag(object, OBJ_ONEMAPPING);
529			} else if ((object->shadow_count == 1) &&
530			    (object->handle == NULL) &&
531			    (object->type == OBJT_DEFAULT ||
532			     object->type == OBJT_SWAP)) {
533				vm_object_t robject;
534
535				robject = LIST_FIRST(&object->shadow_head);
536				KASSERT(robject != NULL,
537				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
538					 object->ref_count,
539					 object->shadow_count));
540				if (!VM_OBJECT_TRYLOCK(robject)) {
541					/*
542					 * Avoid a potential deadlock.
543					 */
544					object->ref_count++;
545					VM_OBJECT_UNLOCK(object);
546					/*
547					 * More likely than not the thread
548					 * holding robject's lock has lower
549					 * priority than the current thread.
550					 * Let the lower priority thread run.
551					 */
552					pause("vmo_de", 1);
553					continue;
554				}
555				/*
556				 * Collapse object into its shadow unless its
557				 * shadow is dead.  In that case, object will
558				 * be deallocated by the thread that is
559				 * deallocating its shadow.
560				 */
561				if ((robject->flags & OBJ_DEAD) == 0 &&
562				    (robject->handle == NULL) &&
563				    (robject->type == OBJT_DEFAULT ||
564				     robject->type == OBJT_SWAP)) {
565
566					robject->ref_count++;
567retry:
568					if (robject->paging_in_progress) {
569						VM_OBJECT_UNLOCK(object);
570						vm_object_pip_wait(robject,
571						    "objde1");
572						temp = robject->backing_object;
573						if (object == temp) {
574							VM_OBJECT_LOCK(object);
575							goto retry;
576						}
577					} else if (object->paging_in_progress) {
578						VM_OBJECT_UNLOCK(robject);
579						object->flags |= OBJ_PIPWNT;
580						msleep(object,
581						    VM_OBJECT_MTX(object),
582						    PDROP | PVM, "objde2", 0);
583						VM_OBJECT_LOCK(robject);
584						temp = robject->backing_object;
585						if (object == temp) {
586							VM_OBJECT_LOCK(object);
587							goto retry;
588						}
589					} else
590						VM_OBJECT_UNLOCK(object);
591
592					if (robject->ref_count == 1) {
593						robject->ref_count--;
594						object = robject;
595						goto doterm;
596					}
597					object = robject;
598					vm_object_collapse(object);
599					VM_OBJECT_UNLOCK(object);
600					continue;
601				}
602				VM_OBJECT_UNLOCK(robject);
603			}
604			VM_OBJECT_UNLOCK(object);
605			return;
606		}
607doterm:
608		temp = object->backing_object;
609		if (temp != NULL) {
610			VM_OBJECT_LOCK(temp);
611			LIST_REMOVE(object, shadow_list);
612			temp->shadow_count--;
613			temp->generation++;
614			VM_OBJECT_UNLOCK(temp);
615			object->backing_object = NULL;
616		}
617		/*
618		 * Don't double-terminate, we could be in a termination
619		 * recursion due to the terminate having to sync data
620		 * to disk.
621		 */
622		if ((object->flags & OBJ_DEAD) == 0)
623			vm_object_terminate(object);
624		else
625			VM_OBJECT_UNLOCK(object);
626		object = temp;
627	}
628}
629
630/*
631 *	vm_object_destroy removes the object from the global object list
632 *      and frees the space for the object.
633 */
634void
635vm_object_destroy(vm_object_t object)
636{
637
638	/*
639	 * Remove the object from the global object list.
640	 */
641	mtx_lock(&vm_object_list_mtx);
642	TAILQ_REMOVE(&vm_object_list, object, object_list);
643	mtx_unlock(&vm_object_list_mtx);
644
645	/*
646	 * Release the allocation charge.
647	 */
648	if (object->uip != NULL) {
649		KASSERT(object->type == OBJT_DEFAULT ||
650		    object->type == OBJT_SWAP,
651		    ("vm_object_terminate: non-swap obj %p has uip",
652		     object));
653		swap_release_by_uid(object->charge, object->uip);
654		object->charge = 0;
655		uifree(object->uip);
656		object->uip = NULL;
657	}
658
659	/*
660	 * Free the space for the object.
661	 */
662	uma_zfree(obj_zone, object);
663}
664
665/*
666 *	vm_object_terminate actually destroys the specified object, freeing
667 *	up all previously used resources.
668 *
669 *	The object must be locked.
670 *	This routine may block.
671 */
672void
673vm_object_terminate(vm_object_t object)
674{
675	vm_page_t p;
676
677	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
678
679	/*
680	 * Make sure no one uses us.
681	 */
682	vm_object_set_flag(object, OBJ_DEAD);
683
684	/*
685	 * wait for the pageout daemon to be done with the object
686	 */
687	vm_object_pip_wait(object, "objtrm");
688
689	KASSERT(!object->paging_in_progress,
690		("vm_object_terminate: pageout in progress"));
691
692	/*
693	 * Clean and free the pages, as appropriate. All references to the
694	 * object are gone, so we don't need to lock it.
695	 */
696	if (object->type == OBJT_VNODE) {
697		struct vnode *vp = (struct vnode *)object->handle;
698
699		/*
700		 * Clean pages and flush buffers.
701		 */
702		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
703		VM_OBJECT_UNLOCK(object);
704
705		vinvalbuf(vp, V_SAVE, 0, 0);
706
707		VM_OBJECT_LOCK(object);
708	}
709
710	KASSERT(object->ref_count == 0,
711		("vm_object_terminate: object with references, ref_count=%d",
712		object->ref_count));
713
714	/*
715	 * Now free any remaining pages. For internal objects, this also
716	 * removes them from paging queues. Don't free wired pages, just
717	 * remove them from the object.
718	 */
719	vm_page_lock_queues();
720	while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
721		KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0,
722			("vm_object_terminate: freeing busy page %p "
723			"p->busy = %d, p->oflags %x\n", p, p->busy, p->oflags));
724		if (p->wire_count == 0) {
725			vm_page_free(p);
726			cnt.v_pfree++;
727		} else {
728			vm_page_remove(p);
729		}
730	}
731	vm_page_unlock_queues();
732
733#if VM_NRESERVLEVEL > 0
734	if (__predict_false(!LIST_EMPTY(&object->rvq)))
735		vm_reserv_break_all(object);
736#endif
737	if (__predict_false(object->cache != NULL))
738		vm_page_cache_free(object, 0, 0);
739
740	/*
741	 * Let the pager know object is dead.
742	 */
743	vm_pager_deallocate(object);
744	VM_OBJECT_UNLOCK(object);
745
746	vm_object_destroy(object);
747}
748
749/*
750 *	vm_object_page_clean
751 *
752 *	Clean all dirty pages in the specified range of object.  Leaves page
753 * 	on whatever queue it is currently on.   If NOSYNC is set then do not
754 *	write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
755 *	leaving the object dirty.
756 *
757 *	When stuffing pages asynchronously, allow clustering.  XXX we need a
758 *	synchronous clustering mode implementation.
759 *
760 *	Odd semantics: if start == end, we clean everything.
761 *
762 *	The object must be locked.
763 */
764void
765vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags)
766{
767	vm_page_t p, np;
768	vm_pindex_t tstart, tend;
769	vm_pindex_t pi;
770	int clearobjflags;
771	int pagerflags;
772	int curgeneration;
773
774	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
775	if (object->type != OBJT_VNODE ||
776		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
777		return;
778
779	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
780	pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
781
782	vm_object_set_flag(object, OBJ_CLEANING);
783
784	tstart = start;
785	if (end == 0) {
786		tend = object->size;
787	} else {
788		tend = end;
789	}
790
791	vm_page_lock_queues();
792	/*
793	 * If the caller is smart and only msync()s a range he knows is
794	 * dirty, we may be able to avoid an object scan.  This results in
795	 * a phenominal improvement in performance.  We cannot do this
796	 * as a matter of course because the object may be huge - e.g.
797	 * the size might be in the gigabytes or terrabytes.
798	 */
799	if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) {
800		vm_pindex_t tscan;
801		int scanlimit;
802		int scanreset;
803
804		scanreset = object->resident_page_count / EASY_SCAN_FACTOR;
805		if (scanreset < 16)
806			scanreset = 16;
807		pagerflags |= VM_PAGER_IGNORE_CLEANCHK;
808
809		scanlimit = scanreset;
810		tscan = tstart;
811		while (tscan < tend) {
812			curgeneration = object->generation;
813			p = vm_page_lookup(object, tscan);
814			if (p == NULL || p->valid == 0) {
815				if (--scanlimit == 0)
816					break;
817				++tscan;
818				continue;
819			}
820			vm_page_test_dirty(p);
821			if (p->dirty == 0) {
822				if (--scanlimit == 0)
823					break;
824				++tscan;
825				continue;
826			}
827			/*
828			 * If we have been asked to skip nosync pages and
829			 * this is a nosync page, we can't continue.
830			 */
831			if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
832				if (--scanlimit == 0)
833					break;
834				++tscan;
835				continue;
836			}
837			scanlimit = scanreset;
838
839			/*
840			 * This returns 0 if it was unable to busy the first
841			 * page (i.e. had to sleep).
842			 */
843			tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags);
844		}
845
846		/*
847		 * If everything was dirty and we flushed it successfully,
848		 * and the requested range is not the entire object, we
849		 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can
850		 * return immediately.
851		 */
852		if (tscan >= tend && (tstart || tend < object->size)) {
853			vm_page_unlock_queues();
854			vm_object_clear_flag(object, OBJ_CLEANING);
855			return;
856		}
857		pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK;
858	}
859
860	/*
861	 * Generally set CLEANCHK interlock and make the page read-only so
862	 * we can then clear the object flags.
863	 *
864	 * However, if this is a nosync mmap then the object is likely to
865	 * stay dirty so do not mess with the page and do not clear the
866	 * object flags.
867	 */
868	clearobjflags = 1;
869	TAILQ_FOREACH(p, &object->memq, listq) {
870		p->oflags |= VPO_CLEANCHK;
871		if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC))
872			clearobjflags = 0;
873		else
874			pmap_remove_write(p);
875	}
876
877	if (clearobjflags && (tstart == 0) && (tend == object->size)) {
878		struct vnode *vp;
879
880		vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
881		if (object->type == OBJT_VNODE &&
882		    (vp = (struct vnode *)object->handle) != NULL) {
883			VI_LOCK(vp);
884			if (vp->v_iflag & VI_OBJDIRTY)
885				vp->v_iflag &= ~VI_OBJDIRTY;
886			VI_UNLOCK(vp);
887		}
888	}
889
890rescan:
891	curgeneration = object->generation;
892
893	for (p = TAILQ_FIRST(&object->memq); p; p = np) {
894		int n;
895
896		np = TAILQ_NEXT(p, listq);
897
898again:
899		pi = p->pindex;
900		if ((p->oflags & VPO_CLEANCHK) == 0 ||
901			(pi < tstart) || (pi >= tend) ||
902		    p->valid == 0) {
903			p->oflags &= ~VPO_CLEANCHK;
904			continue;
905		}
906
907		vm_page_test_dirty(p);
908		if (p->dirty == 0) {
909			p->oflags &= ~VPO_CLEANCHK;
910			continue;
911		}
912
913		/*
914		 * If we have been asked to skip nosync pages and this is a
915		 * nosync page, skip it.  Note that the object flags were
916		 * not cleared in this case so we do not have to set them.
917		 */
918		if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
919			p->oflags &= ~VPO_CLEANCHK;
920			continue;
921		}
922
923		n = vm_object_page_collect_flush(object, p,
924			curgeneration, pagerflags);
925		if (n == 0)
926			goto rescan;
927
928		if (object->generation != curgeneration)
929			goto rescan;
930
931		/*
932		 * Try to optimize the next page.  If we can't we pick up
933		 * our (random) scan where we left off.
934		 */
935		if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) {
936			if ((p = vm_page_lookup(object, pi + n)) != NULL)
937				goto again;
938		}
939	}
940	vm_page_unlock_queues();
941#if 0
942	VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
943#endif
944
945	vm_object_clear_flag(object, OBJ_CLEANING);
946	return;
947}
948
949static int
950vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags)
951{
952	int runlen;
953	int maxf;
954	int chkb;
955	int maxb;
956	int i;
957	vm_pindex_t pi;
958	vm_page_t maf[vm_pageout_page_count];
959	vm_page_t mab[vm_pageout_page_count];
960	vm_page_t ma[vm_pageout_page_count];
961
962	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
963	pi = p->pindex;
964	while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
965		vm_page_lock_queues();
966		if (object->generation != curgeneration) {
967			return(0);
968		}
969	}
970	maxf = 0;
971	for(i = 1; i < vm_pageout_page_count; i++) {
972		vm_page_t tp;
973
974		if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
975			if ((tp->oflags & VPO_BUSY) ||
976				((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
977				 (tp->oflags & VPO_CLEANCHK) == 0) ||
978				(tp->busy != 0))
979				break;
980			vm_page_test_dirty(tp);
981			if (tp->dirty == 0) {
982				tp->oflags &= ~VPO_CLEANCHK;
983				break;
984			}
985			maf[ i - 1 ] = tp;
986			maxf++;
987			continue;
988		}
989		break;
990	}
991
992	maxb = 0;
993	chkb = vm_pageout_page_count -  maxf;
994	if (chkb) {
995		for(i = 1; i < chkb;i++) {
996			vm_page_t tp;
997
998			if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
999				if ((tp->oflags & VPO_BUSY) ||
1000					((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1001					 (tp->oflags & VPO_CLEANCHK) == 0) ||
1002					(tp->busy != 0))
1003					break;
1004				vm_page_test_dirty(tp);
1005				if (tp->dirty == 0) {
1006					tp->oflags &= ~VPO_CLEANCHK;
1007					break;
1008				}
1009				mab[ i - 1 ] = tp;
1010				maxb++;
1011				continue;
1012			}
1013			break;
1014		}
1015	}
1016
1017	for(i = 0; i < maxb; i++) {
1018		int index = (maxb - i) - 1;
1019		ma[index] = mab[i];
1020		ma[index]->oflags &= ~VPO_CLEANCHK;
1021	}
1022	p->oflags &= ~VPO_CLEANCHK;
1023	ma[maxb] = p;
1024	for(i = 0; i < maxf; i++) {
1025		int index = (maxb + i) + 1;
1026		ma[index] = maf[i];
1027		ma[index]->oflags &= ~VPO_CLEANCHK;
1028	}
1029	runlen = maxb + maxf + 1;
1030
1031	vm_pageout_flush(ma, runlen, pagerflags);
1032	for (i = 0; i < runlen; i++) {
1033		if (ma[i]->dirty) {
1034			pmap_remove_write(ma[i]);
1035			ma[i]->oflags |= VPO_CLEANCHK;
1036
1037			/*
1038			 * maxf will end up being the actual number of pages
1039			 * we wrote out contiguously, non-inclusive of the
1040			 * first page.  We do not count look-behind pages.
1041			 */
1042			if (i >= maxb + 1 && (maxf > i - maxb - 1))
1043				maxf = i - maxb - 1;
1044		}
1045	}
1046	return(maxf + 1);
1047}
1048
1049/*
1050 * Note that there is absolutely no sense in writing out
1051 * anonymous objects, so we track down the vnode object
1052 * to write out.
1053 * We invalidate (remove) all pages from the address space
1054 * for semantic correctness.
1055 *
1056 * Note: certain anonymous maps, such as MAP_NOSYNC maps,
1057 * may start out with a NULL object.
1058 */
1059void
1060vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1061    boolean_t syncio, boolean_t invalidate)
1062{
1063	vm_object_t backing_object;
1064	struct vnode *vp;
1065	struct mount *mp;
1066	int flags;
1067
1068	if (object == NULL)
1069		return;
1070	VM_OBJECT_LOCK(object);
1071	while ((backing_object = object->backing_object) != NULL) {
1072		VM_OBJECT_LOCK(backing_object);
1073		offset += object->backing_object_offset;
1074		VM_OBJECT_UNLOCK(object);
1075		object = backing_object;
1076		if (object->size < OFF_TO_IDX(offset + size))
1077			size = IDX_TO_OFF(object->size) - offset;
1078	}
1079	/*
1080	 * Flush pages if writing is allowed, invalidate them
1081	 * if invalidation requested.  Pages undergoing I/O
1082	 * will be ignored by vm_object_page_remove().
1083	 *
1084	 * We cannot lock the vnode and then wait for paging
1085	 * to complete without deadlocking against vm_fault.
1086	 * Instead we simply call vm_object_page_remove() and
1087	 * allow it to block internally on a page-by-page
1088	 * basis when it encounters pages undergoing async
1089	 * I/O.
1090	 */
1091	if (object->type == OBJT_VNODE &&
1092	    (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
1093		int vfslocked;
1094		vp = object->handle;
1095		VM_OBJECT_UNLOCK(object);
1096		(void) vn_start_write(vp, &mp, V_WAIT);
1097		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1098		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1099		flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1100		flags |= invalidate ? OBJPC_INVAL : 0;
1101		VM_OBJECT_LOCK(object);
1102		vm_object_page_clean(object,
1103		    OFF_TO_IDX(offset),
1104		    OFF_TO_IDX(offset + size + PAGE_MASK),
1105		    flags);
1106		VM_OBJECT_UNLOCK(object);
1107		VOP_UNLOCK(vp, 0);
1108		VFS_UNLOCK_GIANT(vfslocked);
1109		vn_finished_write(mp);
1110		VM_OBJECT_LOCK(object);
1111	}
1112	if ((object->type == OBJT_VNODE ||
1113	     object->type == OBJT_DEVICE) && invalidate) {
1114		boolean_t purge;
1115		purge = old_msync || (object->type == OBJT_DEVICE);
1116		vm_object_page_remove(object,
1117		    OFF_TO_IDX(offset),
1118		    OFF_TO_IDX(offset + size + PAGE_MASK),
1119		    purge ? FALSE : TRUE);
1120	}
1121	VM_OBJECT_UNLOCK(object);
1122}
1123
1124/*
1125 *	vm_object_madvise:
1126 *
1127 *	Implements the madvise function at the object/page level.
1128 *
1129 *	MADV_WILLNEED	(any object)
1130 *
1131 *	    Activate the specified pages if they are resident.
1132 *
1133 *	MADV_DONTNEED	(any object)
1134 *
1135 *	    Deactivate the specified pages if they are resident.
1136 *
1137 *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
1138 *			 OBJ_ONEMAPPING only)
1139 *
1140 *	    Deactivate and clean the specified pages if they are
1141 *	    resident.  This permits the process to reuse the pages
1142 *	    without faulting or the kernel to reclaim the pages
1143 *	    without I/O.
1144 */
1145void
1146vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1147{
1148	vm_pindex_t end, tpindex;
1149	vm_object_t backing_object, tobject;
1150	vm_page_t m;
1151
1152	if (object == NULL)
1153		return;
1154	VM_OBJECT_LOCK(object);
1155	end = pindex + count;
1156	/*
1157	 * Locate and adjust resident pages
1158	 */
1159	for (; pindex < end; pindex += 1) {
1160relookup:
1161		tobject = object;
1162		tpindex = pindex;
1163shadowlookup:
1164		/*
1165		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1166		 * and those pages must be OBJ_ONEMAPPING.
1167		 */
1168		if (advise == MADV_FREE) {
1169			if ((tobject->type != OBJT_DEFAULT &&
1170			     tobject->type != OBJT_SWAP) ||
1171			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
1172				goto unlock_tobject;
1173			}
1174		}
1175		m = vm_page_lookup(tobject, tpindex);
1176		if (m == NULL && advise == MADV_WILLNEED) {
1177			/*
1178			 * If the page is cached, reactivate it.
1179			 */
1180			m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED |
1181			    VM_ALLOC_NOBUSY);
1182		}
1183		if (m == NULL) {
1184			/*
1185			 * There may be swap even if there is no backing page
1186			 */
1187			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1188				swap_pager_freespace(tobject, tpindex, 1);
1189			/*
1190			 * next object
1191			 */
1192			backing_object = tobject->backing_object;
1193			if (backing_object == NULL)
1194				goto unlock_tobject;
1195			VM_OBJECT_LOCK(backing_object);
1196			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1197			if (tobject != object)
1198				VM_OBJECT_UNLOCK(tobject);
1199			tobject = backing_object;
1200			goto shadowlookup;
1201		}
1202		/*
1203		 * If the page is busy or not in a normal active state,
1204		 * we skip it.  If the page is not managed there are no
1205		 * page queues to mess with.  Things can break if we mess
1206		 * with pages in any of the below states.
1207		 */
1208		vm_page_lock_queues();
1209		if (m->hold_count ||
1210		    m->wire_count ||
1211		    (m->flags & PG_UNMANAGED) ||
1212		    m->valid != VM_PAGE_BITS_ALL) {
1213			vm_page_unlock_queues();
1214			goto unlock_tobject;
1215		}
1216		if ((m->oflags & VPO_BUSY) || m->busy) {
1217			vm_page_flag_set(m, PG_REFERENCED);
1218			vm_page_unlock_queues();
1219			if (object != tobject)
1220				VM_OBJECT_UNLOCK(object);
1221			m->oflags |= VPO_WANTED;
1222			msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", 0);
1223			VM_OBJECT_LOCK(object);
1224  			goto relookup;
1225		}
1226		if (advise == MADV_WILLNEED) {
1227			vm_page_activate(m);
1228		} else if (advise == MADV_DONTNEED) {
1229			vm_page_dontneed(m);
1230		} else if (advise == MADV_FREE) {
1231			/*
1232			 * Mark the page clean.  This will allow the page
1233			 * to be freed up by the system.  However, such pages
1234			 * are often reused quickly by malloc()/free()
1235			 * so we do not do anything that would cause
1236			 * a page fault if we can help it.
1237			 *
1238			 * Specifically, we do not try to actually free
1239			 * the page now nor do we try to put it in the
1240			 * cache (which would cause a page fault on reuse).
1241			 *
1242			 * But we do make the page is freeable as we
1243			 * can without actually taking the step of unmapping
1244			 * it.
1245			 */
1246			pmap_clear_modify(m);
1247			m->dirty = 0;
1248			m->act_count = 0;
1249			vm_page_dontneed(m);
1250		}
1251		vm_page_unlock_queues();
1252		if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1253			swap_pager_freespace(tobject, tpindex, 1);
1254unlock_tobject:
1255		if (tobject != object)
1256			VM_OBJECT_UNLOCK(tobject);
1257	}
1258	VM_OBJECT_UNLOCK(object);
1259}
1260
1261/*
1262 *	vm_object_shadow:
1263 *
1264 *	Create a new object which is backed by the
1265 *	specified existing object range.  The source
1266 *	object reference is deallocated.
1267 *
1268 *	The new object and offset into that object
1269 *	are returned in the source parameters.
1270 */
1271void
1272vm_object_shadow(
1273	vm_object_t *object,	/* IN/OUT */
1274	vm_ooffset_t *offset,	/* IN/OUT */
1275	vm_size_t length)
1276{
1277	vm_object_t source;
1278	vm_object_t result;
1279
1280	source = *object;
1281
1282	/*
1283	 * Don't create the new object if the old object isn't shared.
1284	 */
1285	if (source != NULL) {
1286		VM_OBJECT_LOCK(source);
1287		if (source->ref_count == 1 &&
1288		    source->handle == NULL &&
1289		    (source->type == OBJT_DEFAULT ||
1290		     source->type == OBJT_SWAP)) {
1291			VM_OBJECT_UNLOCK(source);
1292			return;
1293		}
1294		VM_OBJECT_UNLOCK(source);
1295	}
1296
1297	/*
1298	 * Allocate a new object with the given length.
1299	 */
1300	result = vm_object_allocate(OBJT_DEFAULT, length);
1301
1302	/*
1303	 * The new object shadows the source object, adding a reference to it.
1304	 * Our caller changes his reference to point to the new object,
1305	 * removing a reference to the source object.  Net result: no change
1306	 * of reference count.
1307	 *
1308	 * Try to optimize the result object's page color when shadowing
1309	 * in order to maintain page coloring consistency in the combined
1310	 * shadowed object.
1311	 */
1312	result->backing_object = source;
1313	/*
1314	 * Store the offset into the source object, and fix up the offset into
1315	 * the new object.
1316	 */
1317	result->backing_object_offset = *offset;
1318	if (source != NULL) {
1319		VM_OBJECT_LOCK(source);
1320		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1321		source->shadow_count++;
1322		source->generation++;
1323#if VM_NRESERVLEVEL > 0
1324		result->flags |= source->flags & OBJ_COLORED;
1325		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
1326		    ((1 << (VM_NFREEORDER - 1)) - 1);
1327#endif
1328		VM_OBJECT_UNLOCK(source);
1329	}
1330
1331
1332	/*
1333	 * Return the new things
1334	 */
1335	*offset = 0;
1336	*object = result;
1337}
1338
1339/*
1340 *	vm_object_split:
1341 *
1342 * Split the pages in a map entry into a new object.  This affords
1343 * easier removal of unused pages, and keeps object inheritance from
1344 * being a negative impact on memory usage.
1345 */
1346void
1347vm_object_split(vm_map_entry_t entry)
1348{
1349	vm_page_t m, m_next;
1350	vm_object_t orig_object, new_object, source;
1351	vm_pindex_t idx, offidxstart;
1352	vm_size_t size;
1353
1354	orig_object = entry->object.vm_object;
1355	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1356		return;
1357	if (orig_object->ref_count <= 1)
1358		return;
1359	VM_OBJECT_UNLOCK(orig_object);
1360
1361	offidxstart = OFF_TO_IDX(entry->offset);
1362	size = atop(entry->end - entry->start);
1363
1364	/*
1365	 * If swap_pager_copy() is later called, it will convert new_object
1366	 * into a swap object.
1367	 */
1368	new_object = vm_object_allocate(OBJT_DEFAULT, size);
1369
1370	/*
1371	 * At this point, the new object is still private, so the order in
1372	 * which the original and new objects are locked does not matter.
1373	 */
1374	VM_OBJECT_LOCK(new_object);
1375	VM_OBJECT_LOCK(orig_object);
1376	source = orig_object->backing_object;
1377	if (source != NULL) {
1378		VM_OBJECT_LOCK(source);
1379		if ((source->flags & OBJ_DEAD) != 0) {
1380			VM_OBJECT_UNLOCK(source);
1381			VM_OBJECT_UNLOCK(orig_object);
1382			VM_OBJECT_UNLOCK(new_object);
1383			vm_object_deallocate(new_object);
1384			VM_OBJECT_LOCK(orig_object);
1385			return;
1386		}
1387		LIST_INSERT_HEAD(&source->shadow_head,
1388				  new_object, shadow_list);
1389		source->shadow_count++;
1390		source->generation++;
1391		vm_object_reference_locked(source);	/* for new_object */
1392		vm_object_clear_flag(source, OBJ_ONEMAPPING);
1393		VM_OBJECT_UNLOCK(source);
1394		new_object->backing_object_offset =
1395			orig_object->backing_object_offset + entry->offset;
1396		new_object->backing_object = source;
1397	}
1398	if (orig_object->uip != NULL) {
1399		new_object->uip = orig_object->uip;
1400		uihold(orig_object->uip);
1401		new_object->charge = ptoa(size);
1402		KASSERT(orig_object->charge >= ptoa(size),
1403		    ("orig_object->charge < 0"));
1404		orig_object->charge -= ptoa(size);
1405	}
1406retry:
1407	if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) {
1408		if (m->pindex < offidxstart) {
1409			m = vm_page_splay(offidxstart, orig_object->root);
1410			if ((orig_object->root = m)->pindex < offidxstart)
1411				m = TAILQ_NEXT(m, listq);
1412		}
1413	}
1414	vm_page_lock_queues();
1415	for (; m != NULL && (idx = m->pindex - offidxstart) < size;
1416	    m = m_next) {
1417		m_next = TAILQ_NEXT(m, listq);
1418
1419		/*
1420		 * We must wait for pending I/O to complete before we can
1421		 * rename the page.
1422		 *
1423		 * We do not have to VM_PROT_NONE the page as mappings should
1424		 * not be changed by this operation.
1425		 */
1426		if ((m->oflags & VPO_BUSY) || m->busy) {
1427			vm_page_flag_set(m, PG_REFERENCED);
1428			vm_page_unlock_queues();
1429			VM_OBJECT_UNLOCK(new_object);
1430			m->oflags |= VPO_WANTED;
1431			msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0);
1432			VM_OBJECT_LOCK(new_object);
1433			goto retry;
1434		}
1435		vm_page_rename(m, new_object, idx);
1436		/* page automatically made dirty by rename and cache handled */
1437		vm_page_busy(m);
1438	}
1439	vm_page_unlock_queues();
1440	if (orig_object->type == OBJT_SWAP) {
1441		/*
1442		 * swap_pager_copy() can sleep, in which case the orig_object's
1443		 * and new_object's locks are released and reacquired.
1444		 */
1445		swap_pager_copy(orig_object, new_object, offidxstart, 0);
1446
1447		/*
1448		 * Transfer any cached pages from orig_object to new_object.
1449		 */
1450		if (__predict_false(orig_object->cache != NULL))
1451			vm_page_cache_transfer(orig_object, offidxstart,
1452			    new_object);
1453	}
1454	VM_OBJECT_UNLOCK(orig_object);
1455	TAILQ_FOREACH(m, &new_object->memq, listq)
1456		vm_page_wakeup(m);
1457	VM_OBJECT_UNLOCK(new_object);
1458	entry->object.vm_object = new_object;
1459	entry->offset = 0LL;
1460	vm_object_deallocate(orig_object);
1461	VM_OBJECT_LOCK(new_object);
1462}
1463
1464#define	OBSC_TEST_ALL_SHADOWED	0x0001
1465#define	OBSC_COLLAPSE_NOWAIT	0x0002
1466#define	OBSC_COLLAPSE_WAIT	0x0004
1467
1468static int
1469vm_object_backing_scan(vm_object_t object, int op)
1470{
1471	int r = 1;
1472	vm_page_t p;
1473	vm_object_t backing_object;
1474	vm_pindex_t backing_offset_index;
1475
1476	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1477	VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
1478
1479	backing_object = object->backing_object;
1480	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1481
1482	/*
1483	 * Initial conditions
1484	 */
1485	if (op & OBSC_TEST_ALL_SHADOWED) {
1486		/*
1487		 * We do not want to have to test for the existence of cache
1488		 * or swap pages in the backing object.  XXX but with the
1489		 * new swapper this would be pretty easy to do.
1490		 *
1491		 * XXX what about anonymous MAP_SHARED memory that hasn't
1492		 * been ZFOD faulted yet?  If we do not test for this, the
1493		 * shadow test may succeed! XXX
1494		 */
1495		if (backing_object->type != OBJT_DEFAULT) {
1496			return (0);
1497		}
1498	}
1499	if (op & OBSC_COLLAPSE_WAIT) {
1500		vm_object_set_flag(backing_object, OBJ_DEAD);
1501	}
1502
1503	/*
1504	 * Our scan
1505	 */
1506	p = TAILQ_FIRST(&backing_object->memq);
1507	while (p) {
1508		vm_page_t next = TAILQ_NEXT(p, listq);
1509		vm_pindex_t new_pindex = p->pindex - backing_offset_index;
1510
1511		if (op & OBSC_TEST_ALL_SHADOWED) {
1512			vm_page_t pp;
1513
1514			/*
1515			 * Ignore pages outside the parent object's range
1516			 * and outside the parent object's mapping of the
1517			 * backing object.
1518			 *
1519			 * note that we do not busy the backing object's
1520			 * page.
1521			 */
1522			if (
1523			    p->pindex < backing_offset_index ||
1524			    new_pindex >= object->size
1525			) {
1526				p = next;
1527				continue;
1528			}
1529
1530			/*
1531			 * See if the parent has the page or if the parent's
1532			 * object pager has the page.  If the parent has the
1533			 * page but the page is not valid, the parent's
1534			 * object pager must have the page.
1535			 *
1536			 * If this fails, the parent does not completely shadow
1537			 * the object and we might as well give up now.
1538			 */
1539
1540			pp = vm_page_lookup(object, new_pindex);
1541			if (
1542			    (pp == NULL || pp->valid == 0) &&
1543			    !vm_pager_has_page(object, new_pindex, NULL, NULL)
1544			) {
1545				r = 0;
1546				break;
1547			}
1548		}
1549
1550		/*
1551		 * Check for busy page
1552		 */
1553		if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1554			vm_page_t pp;
1555
1556			if (op & OBSC_COLLAPSE_NOWAIT) {
1557				if ((p->oflags & VPO_BUSY) ||
1558				    !p->valid ||
1559				    p->busy) {
1560					p = next;
1561					continue;
1562				}
1563			} else if (op & OBSC_COLLAPSE_WAIT) {
1564				if ((p->oflags & VPO_BUSY) || p->busy) {
1565					vm_page_lock_queues();
1566					vm_page_flag_set(p, PG_REFERENCED);
1567					vm_page_unlock_queues();
1568					VM_OBJECT_UNLOCK(object);
1569					p->oflags |= VPO_WANTED;
1570					msleep(p, VM_OBJECT_MTX(backing_object),
1571					    PDROP | PVM, "vmocol", 0);
1572					VM_OBJECT_LOCK(object);
1573					VM_OBJECT_LOCK(backing_object);
1574					/*
1575					 * If we slept, anything could have
1576					 * happened.  Since the object is
1577					 * marked dead, the backing offset
1578					 * should not have changed so we
1579					 * just restart our scan.
1580					 */
1581					p = TAILQ_FIRST(&backing_object->memq);
1582					continue;
1583				}
1584			}
1585
1586			KASSERT(
1587			    p->object == backing_object,
1588			    ("vm_object_backing_scan: object mismatch")
1589			);
1590
1591			/*
1592			 * Destroy any associated swap
1593			 */
1594			if (backing_object->type == OBJT_SWAP) {
1595				swap_pager_freespace(
1596				    backing_object,
1597				    p->pindex,
1598				    1
1599				);
1600			}
1601
1602			if (
1603			    p->pindex < backing_offset_index ||
1604			    new_pindex >= object->size
1605			) {
1606				/*
1607				 * Page is out of the parent object's range, we
1608				 * can simply destroy it.
1609				 */
1610				vm_page_lock_queues();
1611				KASSERT(!pmap_page_is_mapped(p),
1612				    ("freeing mapped page %p", p));
1613				if (p->wire_count == 0)
1614					vm_page_free(p);
1615				else
1616					vm_page_remove(p);
1617				vm_page_unlock_queues();
1618				p = next;
1619				continue;
1620			}
1621
1622			pp = vm_page_lookup(object, new_pindex);
1623			if (
1624			    pp != NULL ||
1625			    vm_pager_has_page(object, new_pindex, NULL, NULL)
1626			) {
1627				/*
1628				 * page already exists in parent OR swap exists
1629				 * for this location in the parent.  Destroy
1630				 * the original page from the backing object.
1631				 *
1632				 * Leave the parent's page alone
1633				 */
1634				vm_page_lock_queues();
1635				KASSERT(!pmap_page_is_mapped(p),
1636				    ("freeing mapped page %p", p));
1637				if (p->wire_count == 0)
1638					vm_page_free(p);
1639				else
1640					vm_page_remove(p);
1641				vm_page_unlock_queues();
1642				p = next;
1643				continue;
1644			}
1645
1646#if VM_NRESERVLEVEL > 0
1647			/*
1648			 * Rename the reservation.
1649			 */
1650			vm_reserv_rename(p, object, backing_object,
1651			    backing_offset_index);
1652#endif
1653
1654			/*
1655			 * Page does not exist in parent, rename the
1656			 * page from the backing object to the main object.
1657			 *
1658			 * If the page was mapped to a process, it can remain
1659			 * mapped through the rename.
1660			 */
1661			vm_page_lock_queues();
1662			vm_page_rename(p, object, new_pindex);
1663			vm_page_unlock_queues();
1664			/* page automatically made dirty by rename */
1665		}
1666		p = next;
1667	}
1668	return (r);
1669}
1670
1671
1672/*
1673 * this version of collapse allows the operation to occur earlier and
1674 * when paging_in_progress is true for an object...  This is not a complete
1675 * operation, but should plug 99.9% of the rest of the leaks.
1676 */
1677static void
1678vm_object_qcollapse(vm_object_t object)
1679{
1680	vm_object_t backing_object = object->backing_object;
1681
1682	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1683	VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
1684
1685	if (backing_object->ref_count != 1)
1686		return;
1687
1688	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1689}
1690
1691/*
1692 *	vm_object_collapse:
1693 *
1694 *	Collapse an object with the object backing it.
1695 *	Pages in the backing object are moved into the
1696 *	parent, and the backing object is deallocated.
1697 */
1698void
1699vm_object_collapse(vm_object_t object)
1700{
1701	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1702
1703	while (TRUE) {
1704		vm_object_t backing_object;
1705
1706		/*
1707		 * Verify that the conditions are right for collapse:
1708		 *
1709		 * The object exists and the backing object exists.
1710		 */
1711		if ((backing_object = object->backing_object) == NULL)
1712			break;
1713
1714		/*
1715		 * we check the backing object first, because it is most likely
1716		 * not collapsable.
1717		 */
1718		VM_OBJECT_LOCK(backing_object);
1719		if (backing_object->handle != NULL ||
1720		    (backing_object->type != OBJT_DEFAULT &&
1721		     backing_object->type != OBJT_SWAP) ||
1722		    (backing_object->flags & OBJ_DEAD) ||
1723		    object->handle != NULL ||
1724		    (object->type != OBJT_DEFAULT &&
1725		     object->type != OBJT_SWAP) ||
1726		    (object->flags & OBJ_DEAD)) {
1727			VM_OBJECT_UNLOCK(backing_object);
1728			break;
1729		}
1730
1731		if (
1732		    object->paging_in_progress != 0 ||
1733		    backing_object->paging_in_progress != 0
1734		) {
1735			vm_object_qcollapse(object);
1736			VM_OBJECT_UNLOCK(backing_object);
1737			break;
1738		}
1739		/*
1740		 * We know that we can either collapse the backing object (if
1741		 * the parent is the only reference to it) or (perhaps) have
1742		 * the parent bypass the object if the parent happens to shadow
1743		 * all the resident pages in the entire backing object.
1744		 *
1745		 * This is ignoring pager-backed pages such as swap pages.
1746		 * vm_object_backing_scan fails the shadowing test in this
1747		 * case.
1748		 */
1749		if (backing_object->ref_count == 1) {
1750			/*
1751			 * If there is exactly one reference to the backing
1752			 * object, we can collapse it into the parent.
1753			 */
1754			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1755
1756#if VM_NRESERVLEVEL > 0
1757			/*
1758			 * Break any reservations from backing_object.
1759			 */
1760			if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1761				vm_reserv_break_all(backing_object);
1762#endif
1763
1764			/*
1765			 * Move the pager from backing_object to object.
1766			 */
1767			if (backing_object->type == OBJT_SWAP) {
1768				/*
1769				 * swap_pager_copy() can sleep, in which case
1770				 * the backing_object's and object's locks are
1771				 * released and reacquired.
1772				 */
1773				swap_pager_copy(
1774				    backing_object,
1775				    object,
1776				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1777
1778				/*
1779				 * Free any cached pages from backing_object.
1780				 */
1781				if (__predict_false(backing_object->cache != NULL))
1782					vm_page_cache_free(backing_object, 0, 0);
1783			}
1784			/*
1785			 * Object now shadows whatever backing_object did.
1786			 * Note that the reference to
1787			 * backing_object->backing_object moves from within
1788			 * backing_object to within object.
1789			 */
1790			LIST_REMOVE(object, shadow_list);
1791			backing_object->shadow_count--;
1792			backing_object->generation++;
1793			if (backing_object->backing_object) {
1794				VM_OBJECT_LOCK(backing_object->backing_object);
1795				LIST_REMOVE(backing_object, shadow_list);
1796				LIST_INSERT_HEAD(
1797				    &backing_object->backing_object->shadow_head,
1798				    object, shadow_list);
1799				/*
1800				 * The shadow_count has not changed.
1801				 */
1802				backing_object->backing_object->generation++;
1803				VM_OBJECT_UNLOCK(backing_object->backing_object);
1804			}
1805			object->backing_object = backing_object->backing_object;
1806			object->backing_object_offset +=
1807			    backing_object->backing_object_offset;
1808
1809			/*
1810			 * Discard backing_object.
1811			 *
1812			 * Since the backing object has no pages, no pager left,
1813			 * and no object references within it, all that is
1814			 * necessary is to dispose of it.
1815			 */
1816			KASSERT(backing_object->ref_count == 1, (
1817"backing_object %p was somehow re-referenced during collapse!",
1818			    backing_object));
1819			VM_OBJECT_UNLOCK(backing_object);
1820			vm_object_destroy(backing_object);
1821
1822			object_collapses++;
1823		} else {
1824			vm_object_t new_backing_object;
1825
1826			/*
1827			 * If we do not entirely shadow the backing object,
1828			 * there is nothing we can do so we give up.
1829			 */
1830			if (object->resident_page_count != object->size &&
1831			    vm_object_backing_scan(object,
1832			    OBSC_TEST_ALL_SHADOWED) == 0) {
1833				VM_OBJECT_UNLOCK(backing_object);
1834				break;
1835			}
1836
1837			/*
1838			 * Make the parent shadow the next object in the
1839			 * chain.  Deallocating backing_object will not remove
1840			 * it, since its reference count is at least 2.
1841			 */
1842			LIST_REMOVE(object, shadow_list);
1843			backing_object->shadow_count--;
1844			backing_object->generation++;
1845
1846			new_backing_object = backing_object->backing_object;
1847			if ((object->backing_object = new_backing_object) != NULL) {
1848				VM_OBJECT_LOCK(new_backing_object);
1849				LIST_INSERT_HEAD(
1850				    &new_backing_object->shadow_head,
1851				    object,
1852				    shadow_list
1853				);
1854				new_backing_object->shadow_count++;
1855				new_backing_object->generation++;
1856				vm_object_reference_locked(new_backing_object);
1857				VM_OBJECT_UNLOCK(new_backing_object);
1858				object->backing_object_offset +=
1859					backing_object->backing_object_offset;
1860			}
1861
1862			/*
1863			 * Drop the reference count on backing_object. Since
1864			 * its ref_count was at least 2, it will not vanish.
1865			 */
1866			backing_object->ref_count--;
1867			VM_OBJECT_UNLOCK(backing_object);
1868			object_bypasses++;
1869		}
1870
1871		/*
1872		 * Try again with this object's new backing object.
1873		 */
1874	}
1875}
1876
1877/*
1878 *	vm_object_page_remove:
1879 *
1880 *	For the given object, either frees or invalidates each of the
1881 *	specified pages.  In general, a page is freed.  However, if a
1882 *	page is wired for any reason other than the existence of a
1883 *	managed, wired mapping, then it may be invalidated but not
1884 *	removed from the object.  Pages are specified by the given
1885 *	range ["start", "end") and Boolean "clean_only".  As a
1886 *	special case, if "end" is zero, then the range extends from
1887 *	"start" to the end of the object.  If "clean_only" is TRUE,
1888 *	then only the non-dirty pages within the specified range are
1889 *	affected.
1890 *
1891 *	In general, this operation should only be performed on objects
1892 *	that contain managed pages.  There are two exceptions.  First,
1893 *	it may be performed on the kernel and kmem objects.  Second,
1894 *	it may be used by msync(..., MS_INVALIDATE) to invalidate
1895 *	device-backed pages.
1896 *
1897 *	The object must be locked.
1898 */
1899void
1900vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1901    boolean_t clean_only)
1902{
1903	vm_page_t p, next;
1904	int wirings;
1905
1906	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1907	if (object->resident_page_count == 0)
1908		goto skipmemq;
1909
1910	/*
1911	 * Since physically-backed objects do not use managed pages, we can't
1912	 * remove pages from the object (we must instead remove the page
1913	 * references, and then destroy the object).
1914	 */
1915	KASSERT(object->type != OBJT_PHYS || object == kernel_object ||
1916	    object == kmem_object,
1917	    ("attempt to remove pages from a physical object"));
1918
1919	vm_object_pip_add(object, 1);
1920again:
1921	if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
1922		if (p->pindex < start) {
1923			p = vm_page_splay(start, object->root);
1924			if ((object->root = p)->pindex < start)
1925				p = TAILQ_NEXT(p, listq);
1926		}
1927	}
1928	vm_page_lock_queues();
1929	/*
1930	 * Assert: the variable p is either (1) the page with the
1931	 * least pindex greater than or equal to the parameter pindex
1932	 * or (2) NULL.
1933	 */
1934	for (;
1935	     p != NULL && (p->pindex < end || end == 0);
1936	     p = next) {
1937		next = TAILQ_NEXT(p, listq);
1938
1939		/*
1940		 * If the page is wired for any reason besides the
1941		 * existence of managed, wired mappings, then it cannot
1942		 * be freed.  For example, fictitious pages, which
1943		 * represent device memory, are inherently wired and
1944		 * cannot be freed.  They can, however, be invalidated
1945		 * if "clean_only" is FALSE.
1946		 */
1947		if ((wirings = p->wire_count) != 0 &&
1948		    (wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
1949			/* Fictitious pages do not have managed mappings. */
1950			if ((p->flags & PG_FICTITIOUS) == 0)
1951				pmap_remove_all(p);
1952			/* Account for removal of managed, wired mappings. */
1953			p->wire_count -= wirings;
1954			if (!clean_only) {
1955				p->valid = 0;
1956				vm_page_undirty(p);
1957			}
1958			continue;
1959		}
1960		if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
1961			goto again;
1962		KASSERT((p->flags & PG_FICTITIOUS) == 0,
1963		    ("vm_object_page_remove: page %p is fictitious", p));
1964		if (clean_only && p->valid) {
1965			pmap_remove_write(p);
1966			if (p->dirty)
1967				continue;
1968		}
1969		pmap_remove_all(p);
1970		/* Account for removal of managed, wired mappings. */
1971		if (wirings != 0)
1972			p->wire_count -= wirings;
1973		vm_page_free(p);
1974	}
1975	vm_page_unlock_queues();
1976	vm_object_pip_wakeup(object);
1977skipmemq:
1978	if (__predict_false(object->cache != NULL))
1979		vm_page_cache_free(object, start, end);
1980}
1981
1982/*
1983 *	Populate the specified range of the object with valid pages.  Returns
1984 *	TRUE if the range is successfully populated and FALSE otherwise.
1985 *
1986 *	Note: This function should be optimized to pass a larger array of
1987 *	pages to vm_pager_get_pages() before it is applied to a non-
1988 *	OBJT_DEVICE object.
1989 *
1990 *	The object must be locked.
1991 */
1992boolean_t
1993vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1994{
1995	vm_page_t m, ma[1];
1996	vm_pindex_t pindex;
1997	int rv;
1998
1999	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2000	for (pindex = start; pindex < end; pindex++) {
2001		m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
2002		    VM_ALLOC_RETRY);
2003		if (m->valid != VM_PAGE_BITS_ALL) {
2004			ma[0] = m;
2005			rv = vm_pager_get_pages(object, ma, 1, 0);
2006			m = vm_page_lookup(object, pindex);
2007			if (m == NULL)
2008				break;
2009			if (rv != VM_PAGER_OK) {
2010				vm_page_lock_queues();
2011				vm_page_free(m);
2012				vm_page_unlock_queues();
2013				break;
2014			}
2015		}
2016		/*
2017		 * Keep "m" busy because a subsequent iteration may unlock
2018		 * the object.
2019		 */
2020	}
2021	if (pindex > start) {
2022		m = vm_page_lookup(object, start);
2023		while (m != NULL && m->pindex < pindex) {
2024			vm_page_wakeup(m);
2025			m = TAILQ_NEXT(m, listq);
2026		}
2027	}
2028	return (pindex == end);
2029}
2030
2031/*
2032 *	Routine:	vm_object_coalesce
2033 *	Function:	Coalesces two objects backing up adjoining
2034 *			regions of memory into a single object.
2035 *
2036 *	returns TRUE if objects were combined.
2037 *
2038 *	NOTE:	Only works at the moment if the second object is NULL -
2039 *		if it's not, which object do we lock first?
2040 *
2041 *	Parameters:
2042 *		prev_object	First object to coalesce
2043 *		prev_offset	Offset into prev_object
2044 *		prev_size	Size of reference to prev_object
2045 *		next_size	Size of reference to the second object
2046 *		reserved	Indicator that extension region has
2047 *				swap accounted for
2048 *
2049 *	Conditions:
2050 *	The object must *not* be locked.
2051 */
2052boolean_t
2053vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2054    vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2055{
2056	vm_pindex_t next_pindex;
2057
2058	if (prev_object == NULL)
2059		return (TRUE);
2060	VM_OBJECT_LOCK(prev_object);
2061	if (prev_object->type != OBJT_DEFAULT &&
2062	    prev_object->type != OBJT_SWAP) {
2063		VM_OBJECT_UNLOCK(prev_object);
2064		return (FALSE);
2065	}
2066
2067	/*
2068	 * Try to collapse the object first
2069	 */
2070	vm_object_collapse(prev_object);
2071
2072	/*
2073	 * Can't coalesce if: . more than one reference . paged out . shadows
2074	 * another object . has a copy elsewhere (any of which mean that the
2075	 * pages not mapped to prev_entry may be in use anyway)
2076	 */
2077	if (prev_object->backing_object != NULL) {
2078		VM_OBJECT_UNLOCK(prev_object);
2079		return (FALSE);
2080	}
2081
2082	prev_size >>= PAGE_SHIFT;
2083	next_size >>= PAGE_SHIFT;
2084	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2085
2086	if ((prev_object->ref_count > 1) &&
2087	    (prev_object->size != next_pindex)) {
2088		VM_OBJECT_UNLOCK(prev_object);
2089		return (FALSE);
2090	}
2091
2092	/*
2093	 * Account for the charge.
2094	 */
2095	if (prev_object->uip != NULL) {
2096
2097		/*
2098		 * If prev_object was charged, then this mapping,
2099		 * althought not charged now, may become writable
2100		 * later. Non-NULL uip in the object would prevent
2101		 * swap reservation during enabling of the write
2102		 * access, so reserve swap now. Failed reservation
2103		 * cause allocation of the separate object for the map
2104		 * entry, and swap reservation for this entry is
2105		 * managed in appropriate time.
2106		 */
2107		if (!reserved && !swap_reserve_by_uid(ptoa(next_size),
2108		    prev_object->uip)) {
2109			return (FALSE);
2110		}
2111		prev_object->charge += ptoa(next_size);
2112	}
2113
2114	/*
2115	 * Remove any pages that may still be in the object from a previous
2116	 * deallocation.
2117	 */
2118	if (next_pindex < prev_object->size) {
2119		vm_object_page_remove(prev_object,
2120				      next_pindex,
2121				      next_pindex + next_size, FALSE);
2122		if (prev_object->type == OBJT_SWAP)
2123			swap_pager_freespace(prev_object,
2124					     next_pindex, next_size);
2125#if 0
2126		if (prev_object->uip != NULL) {
2127			KASSERT(prev_object->charge >=
2128			    ptoa(prev_object->size - next_pindex),
2129			    ("object %p overcharged 1 %jx %jx", prev_object,
2130				(uintmax_t)next_pindex, (uintmax_t)next_size));
2131			prev_object->charge -= ptoa(prev_object->size -
2132			    next_pindex);
2133		}
2134#endif
2135	}
2136
2137	/*
2138	 * Extend the object if necessary.
2139	 */
2140	if (next_pindex + next_size > prev_object->size)
2141		prev_object->size = next_pindex + next_size;
2142
2143	VM_OBJECT_UNLOCK(prev_object);
2144	return (TRUE);
2145}
2146
2147void
2148vm_object_set_writeable_dirty(vm_object_t object)
2149{
2150	struct vnode *vp;
2151
2152	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2153	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
2154		return;
2155	vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
2156	if (object->type == OBJT_VNODE &&
2157	    (vp = (struct vnode *)object->handle) != NULL) {
2158		VI_LOCK(vp);
2159		vp->v_iflag |= VI_OBJDIRTY;
2160		VI_UNLOCK(vp);
2161	}
2162}
2163
2164#include "opt_ddb.h"
2165#ifdef DDB
2166#include <sys/kernel.h>
2167
2168#include <sys/cons.h>
2169
2170#include <ddb/ddb.h>
2171
2172static int
2173_vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2174{
2175	vm_map_t tmpm;
2176	vm_map_entry_t tmpe;
2177	vm_object_t obj;
2178	int entcount;
2179
2180	if (map == 0)
2181		return 0;
2182
2183	if (entry == 0) {
2184		tmpe = map->header.next;
2185		entcount = map->nentries;
2186		while (entcount-- && (tmpe != &map->header)) {
2187			if (_vm_object_in_map(map, object, tmpe)) {
2188				return 1;
2189			}
2190			tmpe = tmpe->next;
2191		}
2192	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
2193		tmpm = entry->object.sub_map;
2194		tmpe = tmpm->header.next;
2195		entcount = tmpm->nentries;
2196		while (entcount-- && tmpe != &tmpm->header) {
2197			if (_vm_object_in_map(tmpm, object, tmpe)) {
2198				return 1;
2199			}
2200			tmpe = tmpe->next;
2201		}
2202	} else if ((obj = entry->object.vm_object) != NULL) {
2203		for (; obj; obj = obj->backing_object)
2204			if (obj == object) {
2205				return 1;
2206			}
2207	}
2208	return 0;
2209}
2210
2211static int
2212vm_object_in_map(vm_object_t object)
2213{
2214	struct proc *p;
2215
2216	/* sx_slock(&allproc_lock); */
2217	FOREACH_PROC_IN_SYSTEM(p) {
2218		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
2219			continue;
2220		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
2221			/* sx_sunlock(&allproc_lock); */
2222			return 1;
2223		}
2224	}
2225	/* sx_sunlock(&allproc_lock); */
2226	if (_vm_object_in_map(kernel_map, object, 0))
2227		return 1;
2228	if (_vm_object_in_map(kmem_map, object, 0))
2229		return 1;
2230	if (_vm_object_in_map(pager_map, object, 0))
2231		return 1;
2232	if (_vm_object_in_map(buffer_map, object, 0))
2233		return 1;
2234	return 0;
2235}
2236
2237DB_SHOW_COMMAND(vmochk, vm_object_check)
2238{
2239	vm_object_t object;
2240
2241	/*
2242	 * make sure that internal objs are in a map somewhere
2243	 * and none have zero ref counts.
2244	 */
2245	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2246		if (object->handle == NULL &&
2247		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2248			if (object->ref_count == 0) {
2249				db_printf("vmochk: internal obj has zero ref count: %ld\n",
2250					(long)object->size);
2251			}
2252			if (!vm_object_in_map(object)) {
2253				db_printf(
2254			"vmochk: internal obj is not in a map: "
2255			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2256				    object->ref_count, (u_long)object->size,
2257				    (u_long)object->size,
2258				    (void *)object->backing_object);
2259			}
2260		}
2261	}
2262}
2263
2264/*
2265 *	vm_object_print:	[ debug ]
2266 */
2267DB_SHOW_COMMAND(object, vm_object_print_static)
2268{
2269	/* XXX convert args. */
2270	vm_object_t object = (vm_object_t)addr;
2271	boolean_t full = have_addr;
2272
2273	vm_page_t p;
2274
2275	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2276#define	count	was_count
2277
2278	int count;
2279
2280	if (object == NULL)
2281		return;
2282
2283	db_iprintf(
2284	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x uip %d charge %jx\n",
2285	    object, (int)object->type, (uintmax_t)object->size,
2286	    object->resident_page_count, object->ref_count, object->flags,
2287	    object->uip ? object->uip->ui_uid : -1, (uintmax_t)object->charge);
2288	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2289	    object->shadow_count,
2290	    object->backing_object ? object->backing_object->ref_count : 0,
2291	    object->backing_object, (uintmax_t)object->backing_object_offset);
2292
2293	if (!full)
2294		return;
2295
2296	db_indent += 2;
2297	count = 0;
2298	TAILQ_FOREACH(p, &object->memq, listq) {
2299		if (count == 0)
2300			db_iprintf("memory:=");
2301		else if (count == 6) {
2302			db_printf("\n");
2303			db_iprintf(" ...");
2304			count = 0;
2305		} else
2306			db_printf(",");
2307		count++;
2308
2309		db_printf("(off=0x%jx,page=0x%jx)",
2310		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2311	}
2312	if (count != 0)
2313		db_printf("\n");
2314	db_indent -= 2;
2315}
2316
2317/* XXX. */
2318#undef count
2319
2320/* XXX need this non-static entry for calling from vm_map_print. */
2321void
2322vm_object_print(
2323        /* db_expr_t */ long addr,
2324	boolean_t have_addr,
2325	/* db_expr_t */ long count,
2326	char *modif)
2327{
2328	vm_object_print_static(addr, have_addr, count, modif);
2329}
2330
2331DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2332{
2333	vm_object_t object;
2334	vm_pindex_t fidx;
2335	vm_paddr_t pa;
2336	vm_page_t m, prev_m;
2337	int rcount, nl, c;
2338
2339	nl = 0;
2340	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2341		db_printf("new object: %p\n", (void *)object);
2342		if (nl > 18) {
2343			c = cngetc();
2344			if (c != ' ')
2345				return;
2346			nl = 0;
2347		}
2348		nl++;
2349		rcount = 0;
2350		fidx = 0;
2351		pa = -1;
2352		TAILQ_FOREACH(m, &object->memq, listq) {
2353			if (m->pindex > 128)
2354				break;
2355			if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2356			    prev_m->pindex + 1 != m->pindex) {
2357				if (rcount) {
2358					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2359						(long)fidx, rcount, (long)pa);
2360					if (nl > 18) {
2361						c = cngetc();
2362						if (c != ' ')
2363							return;
2364						nl = 0;
2365					}
2366					nl++;
2367					rcount = 0;
2368				}
2369			}
2370			if (rcount &&
2371				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2372				++rcount;
2373				continue;
2374			}
2375			if (rcount) {
2376				db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2377					(long)fidx, rcount, (long)pa);
2378				if (nl > 18) {
2379					c = cngetc();
2380					if (c != ' ')
2381						return;
2382					nl = 0;
2383				}
2384				nl++;
2385			}
2386			fidx = m->pindex;
2387			pa = VM_PAGE_TO_PHYS(m);
2388			rcount = 1;
2389		}
2390		if (rcount) {
2391			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2392				(long)fidx, rcount, (long)pa);
2393			if (nl > 18) {
2394				c = cngetc();
2395				if (c != ' ')
2396					return;
2397				nl = 0;
2398			}
2399			nl++;
2400		}
2401	}
2402}
2403#endif /* DDB */
2404