vm_object.c revision 122349
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 */
64
65/*
66 *	Virtual memory object module.
67 */
68
69#include <sys/cdefs.h>
70__FBSDID("$FreeBSD: head/sys/vm/vm_object.c 122349 2003-11-09 05:25:35Z alc $");
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/lock.h>
75#include <sys/mman.h>
76#include <sys/mount.h>
77#include <sys/kernel.h>
78#include <sys/sysctl.h>
79#include <sys/mutex.h>
80#include <sys/proc.h>		/* for curproc, pageproc */
81#include <sys/socket.h>
82#include <sys/vnode.h>
83#include <sys/vmmeter.h>
84#include <sys/sx.h>
85
86#include <vm/vm.h>
87#include <vm/vm_param.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_object.h>
91#include <vm/vm_page.h>
92#include <vm/vm_pageout.h>
93#include <vm/vm_pager.h>
94#include <vm/swap_pager.h>
95#include <vm/vm_kern.h>
96#include <vm/vm_extern.h>
97#include <vm/uma.h>
98
99#define EASY_SCAN_FACTOR       8
100
101#define MSYNC_FLUSH_HARDSEQ	0x01
102#define MSYNC_FLUSH_SOFTSEQ	0x02
103
104/*
105 * msync / VM object flushing optimizations
106 */
107static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ;
108SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags,
109        CTLFLAG_RW, &msync_flush_flags, 0, "");
110
111static void	vm_object_qcollapse(vm_object_t object);
112static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags);
113
114/*
115 *	Virtual memory objects maintain the actual data
116 *	associated with allocated virtual memory.  A given
117 *	page of memory exists within exactly one object.
118 *
119 *	An object is only deallocated when all "references"
120 *	are given up.  Only one "reference" to a given
121 *	region of an object should be writeable.
122 *
123 *	Associated with each object is a list of all resident
124 *	memory pages belonging to that object; this list is
125 *	maintained by the "vm_page" module, and locked by the object's
126 *	lock.
127 *
128 *	Each object also records a "pager" routine which is
129 *	used to retrieve (and store) pages to the proper backing
130 *	storage.  In addition, objects may be backed by other
131 *	objects from which they were virtual-copied.
132 *
133 *	The only items within the object structure which are
134 *	modified after time of creation are:
135 *		reference count		locked by object's lock
136 *		pager routine		locked by object's lock
137 *
138 */
139
140struct object_q vm_object_list;
141struct mtx vm_object_list_mtx;	/* lock for object list and count */
142
143struct vm_object kernel_object_store;
144struct vm_object kmem_object_store;
145
146static long object_collapses;
147static long object_bypasses;
148static int next_index;
149static uma_zone_t obj_zone;
150#define VM_OBJECTS_INIT 256
151
152static void vm_object_zinit(void *mem, int size);
153
154#ifdef INVARIANTS
155static void vm_object_zdtor(void *mem, int size, void *arg);
156
157static void
158vm_object_zdtor(void *mem, int size, void *arg)
159{
160	vm_object_t object;
161
162	object = (vm_object_t)mem;
163	KASSERT(TAILQ_EMPTY(&object->memq),
164	    ("object %p has resident pages",
165	    object));
166	KASSERT(object->paging_in_progress == 0,
167	    ("object %p paging_in_progress = %d",
168	    object, object->paging_in_progress));
169	KASSERT(object->resident_page_count == 0,
170	    ("object %p resident_page_count = %d",
171	    object, object->resident_page_count));
172	KASSERT(object->shadow_count == 0,
173	    ("object %p shadow_count = %d",
174	    object, object->shadow_count));
175}
176#endif
177
178static void
179vm_object_zinit(void *mem, int size)
180{
181	vm_object_t object;
182
183	object = (vm_object_t)mem;
184	bzero(&object->mtx, sizeof(object->mtx));
185	VM_OBJECT_LOCK_INIT(object);
186
187	/* These are true for any object that has been freed */
188	object->paging_in_progress = 0;
189	object->resident_page_count = 0;
190	object->shadow_count = 0;
191}
192
193void
194_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
195{
196	int incr;
197
198	TAILQ_INIT(&object->memq);
199	LIST_INIT(&object->shadow_head);
200
201	object->root = NULL;
202	object->type = type;
203	object->size = size;
204	object->generation = 1;
205	object->ref_count = 1;
206	object->flags = 0;
207	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
208		object->flags = OBJ_ONEMAPPING;
209	if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
210		incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
211	else
212		incr = size;
213	do
214		object->pg_color = next_index;
215	while (!atomic_cmpset_int(&next_index, object->pg_color,
216				  (object->pg_color + incr) & PQ_L2_MASK));
217	object->handle = NULL;
218	object->backing_object = NULL;
219	object->backing_object_offset = (vm_ooffset_t) 0;
220
221	mtx_lock(&vm_object_list_mtx);
222	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
223	mtx_unlock(&vm_object_list_mtx);
224}
225
226/*
227 *	vm_object_init:
228 *
229 *	Initialize the VM objects module.
230 */
231void
232vm_object_init(void)
233{
234	TAILQ_INIT(&vm_object_list);
235	mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
236
237	VM_OBJECT_LOCK_INIT(&kernel_object_store);
238	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
239	    kernel_object);
240
241	/*
242	 * The kmem object's mutex is given a unique name, instead of
243	 * "vm object", to avoid false reports of lock-order reversal
244	 * with a system map mutex.
245	 */
246	mtx_init(VM_OBJECT_MTX(kmem_object), "kmem object", NULL, MTX_DEF);
247	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
248	    kmem_object);
249
250	obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
251#ifdef INVARIANTS
252	    vm_object_zdtor,
253#else
254	    NULL,
255#endif
256	    vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
257	uma_prealloc(obj_zone, VM_OBJECTS_INIT);
258}
259
260void
261vm_object_clear_flag(vm_object_t object, u_short bits)
262{
263
264	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
265	object->flags &= ~bits;
266}
267
268void
269vm_object_pip_add(vm_object_t object, short i)
270{
271
272	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
273	object->paging_in_progress += i;
274}
275
276void
277vm_object_pip_subtract(vm_object_t object, short i)
278{
279
280	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
281	object->paging_in_progress -= i;
282}
283
284void
285vm_object_pip_wakeup(vm_object_t object)
286{
287
288	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
289	object->paging_in_progress--;
290	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
291		vm_object_clear_flag(object, OBJ_PIPWNT);
292		wakeup(object);
293	}
294}
295
296void
297vm_object_pip_wakeupn(vm_object_t object, short i)
298{
299
300	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
301	if (i)
302		object->paging_in_progress -= i;
303	if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
304		vm_object_clear_flag(object, OBJ_PIPWNT);
305		wakeup(object);
306	}
307}
308
309void
310vm_object_pip_wait(vm_object_t object, char *waitid)
311{
312
313	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
314	while (object->paging_in_progress) {
315		object->flags |= OBJ_PIPWNT;
316		msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
317	}
318}
319
320/*
321 *	vm_object_allocate_wait
322 *
323 *	Return a new object with the given size, and give the user the
324 *	option of waiting for it to complete or failing if the needed
325 *	memory isn't available.
326 */
327vm_object_t
328vm_object_allocate_wait(objtype_t type, vm_pindex_t size, int flags)
329{
330	vm_object_t result;
331
332	result = (vm_object_t) uma_zalloc(obj_zone, flags);
333
334	if (result != NULL)
335		_vm_object_allocate(type, size, result);
336
337	return (result);
338}
339
340/*
341 *	vm_object_allocate:
342 *
343 *	Returns a new object with the given size.
344 */
345vm_object_t
346vm_object_allocate(objtype_t type, vm_pindex_t size)
347{
348	return(vm_object_allocate_wait(type, size, M_WAITOK));
349}
350
351
352/*
353 *	vm_object_reference:
354 *
355 *	Gets another reference to the given object.  Note: OBJ_DEAD
356 *	objects can be referenced during final cleaning.
357 */
358void
359vm_object_reference(vm_object_t object)
360{
361	struct vnode *vp;
362	int flags;
363
364	if (object == NULL)
365		return;
366	VM_OBJECT_LOCK(object);
367	object->ref_count++;
368	if (object->type == OBJT_VNODE) {
369		vp = object->handle;
370		VI_LOCK(vp);
371		VM_OBJECT_UNLOCK(object);
372		for (flags = LK_INTERLOCK; vget(vp, flags, curthread);
373		     flags = 0)
374			printf("vm_object_reference: delay in vget\n");
375	} else
376		VM_OBJECT_UNLOCK(object);
377}
378
379/*
380 *	vm_object_reference_locked:
381 *
382 *	Gets another reference to the given object.
383 *
384 *	The object must be locked.
385 */
386void
387vm_object_reference_locked(vm_object_t object)
388{
389	struct vnode *vp;
390
391	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
392	KASSERT((object->flags & OBJ_DEAD) == 0,
393	    ("vm_object_reference_locked: dead object referenced"));
394	object->ref_count++;
395	if (object->type == OBJT_VNODE) {
396		vp = object->handle;
397		vref(vp);
398	}
399}
400
401/*
402 * Handle deallocating an object of type OBJT_VNODE.
403 */
404void
405vm_object_vndeallocate(vm_object_t object)
406{
407	struct vnode *vp = (struct vnode *) object->handle;
408
409	GIANT_REQUIRED;
410	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
411	KASSERT(object->type == OBJT_VNODE,
412	    ("vm_object_vndeallocate: not a vnode object"));
413	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
414#ifdef INVARIANTS
415	if (object->ref_count == 0) {
416		vprint("vm_object_vndeallocate", vp);
417		panic("vm_object_vndeallocate: bad object reference count");
418	}
419#endif
420
421	object->ref_count--;
422	if (object->ref_count == 0) {
423		mp_fixme("Unlocked vflag access.");
424		vp->v_vflag &= ~VV_TEXT;
425	}
426	VM_OBJECT_UNLOCK(object);
427	/*
428	 * vrele may need a vop lock
429	 */
430	vrele(vp);
431}
432
433/*
434 *	vm_object_deallocate:
435 *
436 *	Release a reference to the specified object,
437 *	gained either through a vm_object_allocate
438 *	or a vm_object_reference call.  When all references
439 *	are gone, storage associated with this object
440 *	may be relinquished.
441 *
442 *	No object may be locked.
443 */
444void
445vm_object_deallocate(vm_object_t object)
446{
447	vm_object_t temp;
448
449	if (object != kmem_object)
450		mtx_lock(&Giant);
451	while (object != NULL) {
452		VM_OBJECT_LOCK(object);
453		if (object->type == OBJT_VNODE) {
454			vm_object_vndeallocate(object);
455			goto done;
456		}
457
458		KASSERT(object->ref_count != 0,
459			("vm_object_deallocate: object deallocated too many times: %d", object->type));
460
461		/*
462		 * If the reference count goes to 0 we start calling
463		 * vm_object_terminate() on the object chain.
464		 * A ref count of 1 may be a special case depending on the
465		 * shadow count being 0 or 1.
466		 */
467		object->ref_count--;
468		if (object->ref_count > 1) {
469			VM_OBJECT_UNLOCK(object);
470			goto done;
471		} else if (object->ref_count == 1) {
472			if (object->shadow_count == 0) {
473				vm_object_set_flag(object, OBJ_ONEMAPPING);
474			} else if ((object->shadow_count == 1) &&
475			    (object->handle == NULL) &&
476			    (object->type == OBJT_DEFAULT ||
477			     object->type == OBJT_SWAP)) {
478				vm_object_t robject;
479
480				robject = LIST_FIRST(&object->shadow_head);
481				KASSERT(robject != NULL,
482				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
483					 object->ref_count,
484					 object->shadow_count));
485				if (!VM_OBJECT_TRYLOCK(robject)) {
486					/*
487					 * Avoid a potential deadlock.
488					 */
489					object->ref_count++;
490					VM_OBJECT_UNLOCK(object);
491					continue;
492				}
493				if ((robject->handle == NULL) &&
494				    (robject->type == OBJT_DEFAULT ||
495				     robject->type == OBJT_SWAP)) {
496
497					robject->ref_count++;
498retry:
499					if (robject->paging_in_progress) {
500						VM_OBJECT_UNLOCK(object);
501						vm_object_pip_wait(robject,
502						    "objde1");
503						VM_OBJECT_LOCK(object);
504						goto retry;
505					} else if (object->paging_in_progress) {
506						VM_OBJECT_UNLOCK(robject);
507						object->flags |= OBJ_PIPWNT;
508						msleep(object,
509						    VM_OBJECT_MTX(object),
510						    PDROP | PVM, "objde2", 0);
511						VM_OBJECT_LOCK(robject);
512						VM_OBJECT_LOCK(object);
513						goto retry;
514					}
515					VM_OBJECT_UNLOCK(object);
516					if (robject->ref_count == 1) {
517						robject->ref_count--;
518						object = robject;
519						goto doterm;
520					}
521					object = robject;
522					vm_object_collapse(object);
523					VM_OBJECT_UNLOCK(object);
524					continue;
525				}
526				VM_OBJECT_UNLOCK(robject);
527			}
528			VM_OBJECT_UNLOCK(object);
529			goto done;
530		}
531doterm:
532		temp = object->backing_object;
533		if (temp != NULL) {
534			VM_OBJECT_LOCK(temp);
535			LIST_REMOVE(object, shadow_list);
536			temp->shadow_count--;
537			temp->generation++;
538			VM_OBJECT_UNLOCK(temp);
539			object->backing_object = NULL;
540		}
541		/*
542		 * Don't double-terminate, we could be in a termination
543		 * recursion due to the terminate having to sync data
544		 * to disk.
545		 */
546		if ((object->flags & OBJ_DEAD) == 0)
547			vm_object_terminate(object);
548		else
549			VM_OBJECT_UNLOCK(object);
550		object = temp;
551	}
552done:
553	if (object != kmem_object)
554		mtx_unlock(&Giant);
555}
556
557/*
558 *	vm_object_terminate actually destroys the specified object, freeing
559 *	up all previously used resources.
560 *
561 *	The object must be locked.
562 *	This routine may block.
563 */
564void
565vm_object_terminate(vm_object_t object)
566{
567	vm_page_t p;
568	int s;
569
570	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
571
572	/*
573	 * Make sure no one uses us.
574	 */
575	vm_object_set_flag(object, OBJ_DEAD);
576
577	/*
578	 * wait for the pageout daemon to be done with the object
579	 */
580	vm_object_pip_wait(object, "objtrm");
581
582	KASSERT(!object->paging_in_progress,
583		("vm_object_terminate: pageout in progress"));
584
585	/*
586	 * Clean and free the pages, as appropriate. All references to the
587	 * object are gone, so we don't need to lock it.
588	 */
589	if (object->type == OBJT_VNODE) {
590		struct vnode *vp = (struct vnode *)object->handle;
591
592		/*
593		 * Clean pages and flush buffers.
594		 */
595		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
596		VM_OBJECT_UNLOCK(object);
597
598		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
599
600		VM_OBJECT_LOCK(object);
601	}
602
603	KASSERT(object->ref_count == 0,
604		("vm_object_terminate: object with references, ref_count=%d",
605		object->ref_count));
606
607	/*
608	 * Now free any remaining pages. For internal objects, this also
609	 * removes them from paging queues. Don't free wired pages, just
610	 * remove them from the object.
611	 */
612	s = splvm();
613	vm_page_lock_queues();
614	while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
615		KASSERT(!p->busy && (p->flags & PG_BUSY) == 0,
616			("vm_object_terminate: freeing busy page %p "
617			"p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
618		if (p->wire_count == 0) {
619			vm_page_busy(p);
620			vm_page_free(p);
621			cnt.v_pfree++;
622		} else {
623			vm_page_busy(p);
624			vm_page_remove(p);
625		}
626	}
627	vm_page_unlock_queues();
628	splx(s);
629
630	/*
631	 * Let the pager know object is dead.
632	 */
633	vm_pager_deallocate(object);
634	VM_OBJECT_UNLOCK(object);
635
636	/*
637	 * Remove the object from the global object list.
638	 */
639	mtx_lock(&vm_object_list_mtx);
640	TAILQ_REMOVE(&vm_object_list, object, object_list);
641	mtx_unlock(&vm_object_list_mtx);
642
643	wakeup(object);
644
645	/*
646	 * Free the space for the object.
647	 */
648	uma_zfree(obj_zone, object);
649}
650
651/*
652 *	vm_object_page_clean
653 *
654 *	Clean all dirty pages in the specified range of object.  Leaves page
655 * 	on whatever queue it is currently on.   If NOSYNC is set then do not
656 *	write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
657 *	leaving the object dirty.
658 *
659 *	When stuffing pages asynchronously, allow clustering.  XXX we need a
660 *	synchronous clustering mode implementation.
661 *
662 *	Odd semantics: if start == end, we clean everything.
663 *
664 *	The object must be locked.
665 */
666void
667vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags)
668{
669	vm_page_t p, np;
670	vm_pindex_t tstart, tend;
671	vm_pindex_t pi;
672	int clearobjflags;
673	int pagerflags;
674	int curgeneration;
675
676	GIANT_REQUIRED;
677	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
678	if (object->type != OBJT_VNODE ||
679		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
680		return;
681
682	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
683	pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
684
685	vm_object_set_flag(object, OBJ_CLEANING);
686
687	tstart = start;
688	if (end == 0) {
689		tend = object->size;
690	} else {
691		tend = end;
692	}
693
694	vm_page_lock_queues();
695	/*
696	 * If the caller is smart and only msync()s a range he knows is
697	 * dirty, we may be able to avoid an object scan.  This results in
698	 * a phenominal improvement in performance.  We cannot do this
699	 * as a matter of course because the object may be huge - e.g.
700	 * the size might be in the gigabytes or terrabytes.
701	 */
702	if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) {
703		vm_pindex_t tscan;
704		int scanlimit;
705		int scanreset;
706
707		scanreset = object->resident_page_count / EASY_SCAN_FACTOR;
708		if (scanreset < 16)
709			scanreset = 16;
710		pagerflags |= VM_PAGER_IGNORE_CLEANCHK;
711
712		scanlimit = scanreset;
713		tscan = tstart;
714		while (tscan < tend) {
715			curgeneration = object->generation;
716			p = vm_page_lookup(object, tscan);
717			if (p == NULL || p->valid == 0 ||
718			    (p->queue - p->pc) == PQ_CACHE) {
719				if (--scanlimit == 0)
720					break;
721				++tscan;
722				continue;
723			}
724			vm_page_test_dirty(p);
725			if ((p->dirty & p->valid) == 0) {
726				if (--scanlimit == 0)
727					break;
728				++tscan;
729				continue;
730			}
731			/*
732			 * If we have been asked to skip nosync pages and
733			 * this is a nosync page, we can't continue.
734			 */
735			if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
736				if (--scanlimit == 0)
737					break;
738				++tscan;
739				continue;
740			}
741			scanlimit = scanreset;
742
743			/*
744			 * This returns 0 if it was unable to busy the first
745			 * page (i.e. had to sleep).
746			 */
747			tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags);
748		}
749
750		/*
751		 * If everything was dirty and we flushed it successfully,
752		 * and the requested range is not the entire object, we
753		 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can
754		 * return immediately.
755		 */
756		if (tscan >= tend && (tstart || tend < object->size)) {
757			vm_page_unlock_queues();
758			vm_object_clear_flag(object, OBJ_CLEANING);
759			return;
760		}
761		pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK;
762	}
763
764	/*
765	 * Generally set CLEANCHK interlock and make the page read-only so
766	 * we can then clear the object flags.
767	 *
768	 * However, if this is a nosync mmap then the object is likely to
769	 * stay dirty so do not mess with the page and do not clear the
770	 * object flags.
771	 */
772	clearobjflags = 1;
773	TAILQ_FOREACH(p, &object->memq, listq) {
774		vm_page_flag_set(p, PG_CLEANCHK);
775		if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
776			clearobjflags = 0;
777		else
778			pmap_page_protect(p, VM_PROT_READ);
779	}
780
781	if (clearobjflags && (tstart == 0) && (tend == object->size)) {
782		struct vnode *vp;
783
784		vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
785		if (object->type == OBJT_VNODE &&
786		    (vp = (struct vnode *)object->handle) != NULL) {
787			VI_LOCK(vp);
788			if (vp->v_iflag & VI_OBJDIRTY)
789				vp->v_iflag &= ~VI_OBJDIRTY;
790			VI_UNLOCK(vp);
791		}
792	}
793
794rescan:
795	curgeneration = object->generation;
796
797	for (p = TAILQ_FIRST(&object->memq); p; p = np) {
798		int n;
799
800		np = TAILQ_NEXT(p, listq);
801
802again:
803		pi = p->pindex;
804		if (((p->flags & PG_CLEANCHK) == 0) ||
805			(pi < tstart) || (pi >= tend) ||
806			(p->valid == 0) ||
807			((p->queue - p->pc) == PQ_CACHE)) {
808			vm_page_flag_clear(p, PG_CLEANCHK);
809			continue;
810		}
811
812		vm_page_test_dirty(p);
813		if ((p->dirty & p->valid) == 0) {
814			vm_page_flag_clear(p, PG_CLEANCHK);
815			continue;
816		}
817
818		/*
819		 * If we have been asked to skip nosync pages and this is a
820		 * nosync page, skip it.  Note that the object flags were
821		 * not cleared in this case so we do not have to set them.
822		 */
823		if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
824			vm_page_flag_clear(p, PG_CLEANCHK);
825			continue;
826		}
827
828		n = vm_object_page_collect_flush(object, p,
829			curgeneration, pagerflags);
830		if (n == 0)
831			goto rescan;
832
833		if (object->generation != curgeneration)
834			goto rescan;
835
836		/*
837		 * Try to optimize the next page.  If we can't we pick up
838		 * our (random) scan where we left off.
839		 */
840		if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) {
841			if ((p = vm_page_lookup(object, pi + n)) != NULL)
842				goto again;
843		}
844	}
845	vm_page_unlock_queues();
846#if 0
847	VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
848#endif
849
850	vm_object_clear_flag(object, OBJ_CLEANING);
851	return;
852}
853
854static int
855vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags)
856{
857	int runlen;
858	int s;
859	int maxf;
860	int chkb;
861	int maxb;
862	int i;
863	vm_pindex_t pi;
864	vm_page_t maf[vm_pageout_page_count];
865	vm_page_t mab[vm_pageout_page_count];
866	vm_page_t ma[vm_pageout_page_count];
867
868	s = splvm();
869	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
870	pi = p->pindex;
871	while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
872		vm_page_lock_queues();
873		if (object->generation != curgeneration) {
874			splx(s);
875			return(0);
876		}
877	}
878	maxf = 0;
879	for(i = 1; i < vm_pageout_page_count; i++) {
880		vm_page_t tp;
881
882		if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
883			if ((tp->flags & PG_BUSY) ||
884				((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
885				 (tp->flags & PG_CLEANCHK) == 0) ||
886				(tp->busy != 0))
887				break;
888			if((tp->queue - tp->pc) == PQ_CACHE) {
889				vm_page_flag_clear(tp, PG_CLEANCHK);
890				break;
891			}
892			vm_page_test_dirty(tp);
893			if ((tp->dirty & tp->valid) == 0) {
894				vm_page_flag_clear(tp, PG_CLEANCHK);
895				break;
896			}
897			maf[ i - 1 ] = tp;
898			maxf++;
899			continue;
900		}
901		break;
902	}
903
904	maxb = 0;
905	chkb = vm_pageout_page_count -  maxf;
906	if (chkb) {
907		for(i = 1; i < chkb;i++) {
908			vm_page_t tp;
909
910			if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
911				if ((tp->flags & PG_BUSY) ||
912					((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
913					 (tp->flags & PG_CLEANCHK) == 0) ||
914					(tp->busy != 0))
915					break;
916				if ((tp->queue - tp->pc) == PQ_CACHE) {
917					vm_page_flag_clear(tp, PG_CLEANCHK);
918					break;
919				}
920				vm_page_test_dirty(tp);
921				if ((tp->dirty & tp->valid) == 0) {
922					vm_page_flag_clear(tp, PG_CLEANCHK);
923					break;
924				}
925				mab[ i - 1 ] = tp;
926				maxb++;
927				continue;
928			}
929			break;
930		}
931	}
932
933	for(i = 0; i < maxb; i++) {
934		int index = (maxb - i) - 1;
935		ma[index] = mab[i];
936		vm_page_flag_clear(ma[index], PG_CLEANCHK);
937	}
938	vm_page_flag_clear(p, PG_CLEANCHK);
939	ma[maxb] = p;
940	for(i = 0; i < maxf; i++) {
941		int index = (maxb + i) + 1;
942		ma[index] = maf[i];
943		vm_page_flag_clear(ma[index], PG_CLEANCHK);
944	}
945	runlen = maxb + maxf + 1;
946
947	splx(s);
948	vm_pageout_flush(ma, runlen, pagerflags);
949	for (i = 0; i < runlen; i++) {
950		if (ma[i]->valid & ma[i]->dirty) {
951			pmap_page_protect(ma[i], VM_PROT_READ);
952			vm_page_flag_set(ma[i], PG_CLEANCHK);
953
954			/*
955			 * maxf will end up being the actual number of pages
956			 * we wrote out contiguously, non-inclusive of the
957			 * first page.  We do not count look-behind pages.
958			 */
959			if (i >= maxb + 1 && (maxf > i - maxb - 1))
960				maxf = i - maxb - 1;
961		}
962	}
963	return(maxf + 1);
964}
965
966/*
967 * Note that there is absolutely no sense in writing out
968 * anonymous objects, so we track down the vnode object
969 * to write out.
970 * We invalidate (remove) all pages from the address space
971 * for semantic correctness.
972 *
973 * Note: certain anonymous maps, such as MAP_NOSYNC maps,
974 * may start out with a NULL object.
975 */
976void
977vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
978    boolean_t syncio, boolean_t invalidate)
979{
980	vm_object_t backing_object;
981	struct vnode *vp;
982	int flags;
983
984	if (object == NULL)
985		return;
986	VM_OBJECT_LOCK(object);
987	while ((backing_object = object->backing_object) != NULL) {
988		VM_OBJECT_LOCK(backing_object);
989		VM_OBJECT_UNLOCK(object);
990		object = backing_object;
991		offset += object->backing_object_offset;
992		if (object->size < OFF_TO_IDX(offset + size))
993			size = IDX_TO_OFF(object->size) - offset;
994	}
995	/*
996	 * Flush pages if writing is allowed, invalidate them
997	 * if invalidation requested.  Pages undergoing I/O
998	 * will be ignored by vm_object_page_remove().
999	 *
1000	 * We cannot lock the vnode and then wait for paging
1001	 * to complete without deadlocking against vm_fault.
1002	 * Instead we simply call vm_object_page_remove() and
1003	 * allow it to block internally on a page-by-page
1004	 * basis when it encounters pages undergoing async
1005	 * I/O.
1006	 */
1007	if (object->type == OBJT_VNODE &&
1008	    (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
1009		vp = object->handle;
1010		VM_OBJECT_UNLOCK(object);
1011		mtx_lock(&Giant);
1012		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1013		flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1014		flags |= invalidate ? OBJPC_INVAL : 0;
1015		VM_OBJECT_LOCK(object);
1016		vm_object_page_clean(object,
1017		    OFF_TO_IDX(offset),
1018		    OFF_TO_IDX(offset + size + PAGE_MASK),
1019		    flags);
1020		VM_OBJECT_UNLOCK(object);
1021		VOP_UNLOCK(vp, 0, curthread);
1022		mtx_unlock(&Giant);
1023		VM_OBJECT_LOCK(object);
1024	}
1025	if ((object->type == OBJT_VNODE ||
1026	     object->type == OBJT_DEVICE) && invalidate) {
1027		vm_object_page_remove(object,
1028		    OFF_TO_IDX(offset),
1029		    OFF_TO_IDX(offset + size + PAGE_MASK),
1030		    FALSE);
1031	}
1032	VM_OBJECT_UNLOCK(object);
1033}
1034
1035/*
1036 *	vm_object_madvise:
1037 *
1038 *	Implements the madvise function at the object/page level.
1039 *
1040 *	MADV_WILLNEED	(any object)
1041 *
1042 *	    Activate the specified pages if they are resident.
1043 *
1044 *	MADV_DONTNEED	(any object)
1045 *
1046 *	    Deactivate the specified pages if they are resident.
1047 *
1048 *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
1049 *			 OBJ_ONEMAPPING only)
1050 *
1051 *	    Deactivate and clean the specified pages if they are
1052 *	    resident.  This permits the process to reuse the pages
1053 *	    without faulting or the kernel to reclaim the pages
1054 *	    without I/O.
1055 */
1056void
1057vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1058{
1059	vm_pindex_t end, tpindex;
1060	vm_object_t backing_object, tobject;
1061	vm_page_t m;
1062
1063	if (object == NULL)
1064		return;
1065	end = pindex + count;
1066	/*
1067	 * Locate and adjust resident pages
1068	 */
1069	for (; pindex < end; pindex += 1) {
1070relookup:
1071		tobject = object;
1072		tpindex = pindex;
1073		VM_OBJECT_LOCK(tobject);
1074shadowlookup:
1075		/*
1076		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1077		 * and those pages must be OBJ_ONEMAPPING.
1078		 */
1079		if (advise == MADV_FREE) {
1080			if ((tobject->type != OBJT_DEFAULT &&
1081			     tobject->type != OBJT_SWAP) ||
1082			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
1083				goto unlock_tobject;
1084			}
1085		}
1086		m = vm_page_lookup(tobject, tpindex);
1087		if (m == NULL) {
1088			/*
1089			 * There may be swap even if there is no backing page
1090			 */
1091			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1092				swap_pager_freespace(tobject, tpindex, 1);
1093			/*
1094			 * next object
1095			 */
1096			backing_object = tobject->backing_object;
1097			if (backing_object == NULL)
1098				goto unlock_tobject;
1099			VM_OBJECT_LOCK(backing_object);
1100			VM_OBJECT_UNLOCK(tobject);
1101			tobject = backing_object;
1102			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1103			goto shadowlookup;
1104		}
1105		/*
1106		 * If the page is busy or not in a normal active state,
1107		 * we skip it.  If the page is not managed there are no
1108		 * page queues to mess with.  Things can break if we mess
1109		 * with pages in any of the below states.
1110		 */
1111		vm_page_lock_queues();
1112		if (m->hold_count ||
1113		    m->wire_count ||
1114		    (m->flags & PG_UNMANAGED) ||
1115		    m->valid != VM_PAGE_BITS_ALL) {
1116			vm_page_unlock_queues();
1117			goto unlock_tobject;
1118		}
1119 		if (vm_page_sleep_if_busy(m, TRUE, "madvpo")) {
1120			VM_OBJECT_UNLOCK(tobject);
1121  			goto relookup;
1122		}
1123		if (advise == MADV_WILLNEED) {
1124			vm_page_activate(m);
1125		} else if (advise == MADV_DONTNEED) {
1126			vm_page_dontneed(m);
1127		} else if (advise == MADV_FREE) {
1128			/*
1129			 * Mark the page clean.  This will allow the page
1130			 * to be freed up by the system.  However, such pages
1131			 * are often reused quickly by malloc()/free()
1132			 * so we do not do anything that would cause
1133			 * a page fault if we can help it.
1134			 *
1135			 * Specifically, we do not try to actually free
1136			 * the page now nor do we try to put it in the
1137			 * cache (which would cause a page fault on reuse).
1138			 *
1139			 * But we do make the page is freeable as we
1140			 * can without actually taking the step of unmapping
1141			 * it.
1142			 */
1143			pmap_clear_modify(m);
1144			m->dirty = 0;
1145			m->act_count = 0;
1146			vm_page_dontneed(m);
1147		}
1148		vm_page_unlock_queues();
1149		if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1150			swap_pager_freespace(tobject, tpindex, 1);
1151unlock_tobject:
1152		VM_OBJECT_UNLOCK(tobject);
1153	}
1154}
1155
1156/*
1157 *	vm_object_shadow:
1158 *
1159 *	Create a new object which is backed by the
1160 *	specified existing object range.  The source
1161 *	object reference is deallocated.
1162 *
1163 *	The new object and offset into that object
1164 *	are returned in the source parameters.
1165 */
1166void
1167vm_object_shadow(
1168	vm_object_t *object,	/* IN/OUT */
1169	vm_ooffset_t *offset,	/* IN/OUT */
1170	vm_size_t length)
1171{
1172	vm_object_t source;
1173	vm_object_t result;
1174
1175	source = *object;
1176
1177	/*
1178	 * Don't create the new object if the old object isn't shared.
1179	 */
1180	if (source != NULL) {
1181		VM_OBJECT_LOCK(source);
1182		if (source->ref_count == 1 &&
1183		    source->handle == NULL &&
1184		    (source->type == OBJT_DEFAULT ||
1185		     source->type == OBJT_SWAP)) {
1186			VM_OBJECT_UNLOCK(source);
1187			return;
1188		}
1189		VM_OBJECT_UNLOCK(source);
1190	}
1191
1192	/*
1193	 * Allocate a new object with the given length.
1194	 */
1195	result = vm_object_allocate(OBJT_DEFAULT, length);
1196
1197	/*
1198	 * The new object shadows the source object, adding a reference to it.
1199	 * Our caller changes his reference to point to the new object,
1200	 * removing a reference to the source object.  Net result: no change
1201	 * of reference count.
1202	 *
1203	 * Try to optimize the result object's page color when shadowing
1204	 * in order to maintain page coloring consistency in the combined
1205	 * shadowed object.
1206	 */
1207	result->backing_object = source;
1208	if (source != NULL) {
1209		VM_OBJECT_LOCK(source);
1210		LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1211		source->shadow_count++;
1212		source->generation++;
1213		if (length < source->size)
1214			length = source->size;
1215		if (length > PQ_L2_SIZE / 3 + PQ_PRIME1 ||
1216		    source->generation > 1)
1217			length = PQ_L2_SIZE / 3 + PQ_PRIME1;
1218		result->pg_color = (source->pg_color +
1219		    length * source->generation) & PQ_L2_MASK;
1220		VM_OBJECT_UNLOCK(source);
1221		next_index = (result->pg_color + PQ_L2_SIZE / 3 + PQ_PRIME1) &
1222		    PQ_L2_MASK;
1223	}
1224
1225	/*
1226	 * Store the offset into the source object, and fix up the offset into
1227	 * the new object.
1228	 */
1229	result->backing_object_offset = *offset;
1230
1231	/*
1232	 * Return the new things
1233	 */
1234	*offset = 0;
1235	*object = result;
1236}
1237
1238/*
1239 *	vm_object_split:
1240 *
1241 * Split the pages in a map entry into a new object.  This affords
1242 * easier removal of unused pages, and keeps object inheritance from
1243 * being a negative impact on memory usage.
1244 */
1245void
1246vm_object_split(vm_map_entry_t entry)
1247{
1248	vm_page_t m;
1249	vm_object_t orig_object, new_object, source;
1250	vm_offset_t s, e;
1251	vm_pindex_t offidxstart, offidxend;
1252	vm_size_t idx, size;
1253	vm_ooffset_t offset;
1254
1255	GIANT_REQUIRED;
1256
1257	orig_object = entry->object.vm_object;
1258	if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1259		return;
1260	if (orig_object->ref_count <= 1)
1261		return;
1262
1263	offset = entry->offset;
1264	s = entry->start;
1265	e = entry->end;
1266
1267	offidxstart = OFF_TO_IDX(offset);
1268	offidxend = offidxstart + OFF_TO_IDX(e - s);
1269	size = offidxend - offidxstart;
1270
1271	new_object = vm_pager_allocate(orig_object->type,
1272		NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
1273	if (new_object == NULL)
1274		return;
1275
1276	VM_OBJECT_LOCK(new_object);
1277	VM_OBJECT_LOCK(orig_object);
1278	source = orig_object->backing_object;
1279	if (source != NULL) {
1280		VM_OBJECT_LOCK(source);
1281		LIST_INSERT_HEAD(&source->shadow_head,
1282				  new_object, shadow_list);
1283		source->shadow_count++;
1284		source->generation++;
1285		vm_object_reference_locked(source);	/* for new_object */
1286		vm_object_clear_flag(source, OBJ_ONEMAPPING);
1287		VM_OBJECT_UNLOCK(source);
1288		new_object->backing_object_offset =
1289			orig_object->backing_object_offset + offset;
1290		new_object->backing_object = source;
1291	}
1292	for (idx = 0; idx < size; idx++) {
1293	retry:
1294		m = vm_page_lookup(orig_object, offidxstart + idx);
1295		if (m == NULL)
1296			continue;
1297
1298		/*
1299		 * We must wait for pending I/O to complete before we can
1300		 * rename the page.
1301		 *
1302		 * We do not have to VM_PROT_NONE the page as mappings should
1303		 * not be changed by this operation.
1304		 */
1305		vm_page_lock_queues();
1306		if ((m->flags & PG_BUSY) || m->busy) {
1307			vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1308			VM_OBJECT_UNLOCK(orig_object);
1309			VM_OBJECT_UNLOCK(new_object);
1310			msleep(m, &vm_page_queue_mtx, PDROP | PVM, "spltwt", 0);
1311			VM_OBJECT_LOCK(new_object);
1312			VM_OBJECT_LOCK(orig_object);
1313			goto retry;
1314		}
1315		vm_page_busy(m);
1316		vm_page_rename(m, new_object, idx);
1317		/* page automatically made dirty by rename and cache handled */
1318		vm_page_busy(m);
1319		vm_page_unlock_queues();
1320	}
1321	if (orig_object->type == OBJT_SWAP) {
1322		/*
1323		 * swap_pager_copy() can sleep, in which case the orig_object's
1324		 * and new_object's locks are released and reacquired.
1325		 */
1326		swap_pager_copy(orig_object, new_object, offidxstart, 0);
1327	}
1328	VM_OBJECT_UNLOCK(orig_object);
1329	vm_page_lock_queues();
1330	TAILQ_FOREACH(m, &new_object->memq, listq)
1331		vm_page_wakeup(m);
1332	vm_page_unlock_queues();
1333	VM_OBJECT_UNLOCK(new_object);
1334	entry->object.vm_object = new_object;
1335	entry->offset = 0LL;
1336	vm_object_deallocate(orig_object);
1337}
1338
1339#define	OBSC_TEST_ALL_SHADOWED	0x0001
1340#define	OBSC_COLLAPSE_NOWAIT	0x0002
1341#define	OBSC_COLLAPSE_WAIT	0x0004
1342
1343static int
1344vm_object_backing_scan(vm_object_t object, int op)
1345{
1346	int s;
1347	int r = 1;
1348	vm_page_t p;
1349	vm_object_t backing_object;
1350	vm_pindex_t backing_offset_index;
1351
1352	s = splvm();
1353	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1354	VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
1355
1356	backing_object = object->backing_object;
1357	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1358
1359	/*
1360	 * Initial conditions
1361	 */
1362	if (op & OBSC_TEST_ALL_SHADOWED) {
1363		/*
1364		 * We do not want to have to test for the existence of
1365		 * swap pages in the backing object.  XXX but with the
1366		 * new swapper this would be pretty easy to do.
1367		 *
1368		 * XXX what about anonymous MAP_SHARED memory that hasn't
1369		 * been ZFOD faulted yet?  If we do not test for this, the
1370		 * shadow test may succeed! XXX
1371		 */
1372		if (backing_object->type != OBJT_DEFAULT) {
1373			splx(s);
1374			return (0);
1375		}
1376	}
1377	if (op & OBSC_COLLAPSE_WAIT) {
1378		vm_object_set_flag(backing_object, OBJ_DEAD);
1379	}
1380
1381	/*
1382	 * Our scan
1383	 */
1384	p = TAILQ_FIRST(&backing_object->memq);
1385	while (p) {
1386		vm_page_t next = TAILQ_NEXT(p, listq);
1387		vm_pindex_t new_pindex = p->pindex - backing_offset_index;
1388
1389		if (op & OBSC_TEST_ALL_SHADOWED) {
1390			vm_page_t pp;
1391
1392			/*
1393			 * Ignore pages outside the parent object's range
1394			 * and outside the parent object's mapping of the
1395			 * backing object.
1396			 *
1397			 * note that we do not busy the backing object's
1398			 * page.
1399			 */
1400			if (
1401			    p->pindex < backing_offset_index ||
1402			    new_pindex >= object->size
1403			) {
1404				p = next;
1405				continue;
1406			}
1407
1408			/*
1409			 * See if the parent has the page or if the parent's
1410			 * object pager has the page.  If the parent has the
1411			 * page but the page is not valid, the parent's
1412			 * object pager must have the page.
1413			 *
1414			 * If this fails, the parent does not completely shadow
1415			 * the object and we might as well give up now.
1416			 */
1417
1418			pp = vm_page_lookup(object, new_pindex);
1419			if (
1420			    (pp == NULL || pp->valid == 0) &&
1421			    !vm_pager_has_page(object, new_pindex, NULL, NULL)
1422			) {
1423				r = 0;
1424				break;
1425			}
1426		}
1427
1428		/*
1429		 * Check for busy page
1430		 */
1431		if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1432			vm_page_t pp;
1433
1434			vm_page_lock_queues();
1435			if (op & OBSC_COLLAPSE_NOWAIT) {
1436				if ((p->flags & PG_BUSY) ||
1437				    !p->valid ||
1438				    p->hold_count ||
1439				    p->wire_count ||
1440				    p->busy) {
1441					vm_page_unlock_queues();
1442					p = next;
1443					continue;
1444				}
1445			} else if (op & OBSC_COLLAPSE_WAIT) {
1446				if ((p->flags & PG_BUSY) || p->busy) {
1447					vm_page_flag_set(p,
1448					    PG_WANTED | PG_REFERENCED);
1449					VM_OBJECT_UNLOCK(backing_object);
1450					VM_OBJECT_UNLOCK(object);
1451					msleep(p, &vm_page_queue_mtx,
1452					    PDROP | PVM, "vmocol", 0);
1453					VM_OBJECT_LOCK(object);
1454					VM_OBJECT_LOCK(backing_object);
1455					/*
1456					 * If we slept, anything could have
1457					 * happened.  Since the object is
1458					 * marked dead, the backing offset
1459					 * should not have changed so we
1460					 * just restart our scan.
1461					 */
1462					p = TAILQ_FIRST(&backing_object->memq);
1463					continue;
1464				}
1465			}
1466
1467			/*
1468			 * Busy the page
1469			 */
1470			vm_page_busy(p);
1471			vm_page_unlock_queues();
1472
1473			KASSERT(
1474			    p->object == backing_object,
1475			    ("vm_object_qcollapse(): object mismatch")
1476			);
1477
1478			/*
1479			 * Destroy any associated swap
1480			 */
1481			if (backing_object->type == OBJT_SWAP) {
1482				swap_pager_freespace(
1483				    backing_object,
1484				    p->pindex,
1485				    1
1486				);
1487			}
1488
1489			if (
1490			    p->pindex < backing_offset_index ||
1491			    new_pindex >= object->size
1492			) {
1493				/*
1494				 * Page is out of the parent object's range, we
1495				 * can simply destroy it.
1496				 */
1497				vm_page_lock_queues();
1498				pmap_remove_all(p);
1499				vm_page_free(p);
1500				vm_page_unlock_queues();
1501				p = next;
1502				continue;
1503			}
1504
1505			pp = vm_page_lookup(object, new_pindex);
1506			if (
1507			    pp != NULL ||
1508			    vm_pager_has_page(object, new_pindex, NULL, NULL)
1509			) {
1510				/*
1511				 * page already exists in parent OR swap exists
1512				 * for this location in the parent.  Destroy
1513				 * the original page from the backing object.
1514				 *
1515				 * Leave the parent's page alone
1516				 */
1517				vm_page_lock_queues();
1518				pmap_remove_all(p);
1519				vm_page_free(p);
1520				vm_page_unlock_queues();
1521				p = next;
1522				continue;
1523			}
1524
1525			/*
1526			 * Page does not exist in parent, rename the
1527			 * page from the backing object to the main object.
1528			 *
1529			 * If the page was mapped to a process, it can remain
1530			 * mapped through the rename.
1531			 */
1532			vm_page_lock_queues();
1533			vm_page_rename(p, object, new_pindex);
1534			vm_page_unlock_queues();
1535			/* page automatically made dirty by rename */
1536		}
1537		p = next;
1538	}
1539	splx(s);
1540	return (r);
1541}
1542
1543
1544/*
1545 * this version of collapse allows the operation to occur earlier and
1546 * when paging_in_progress is true for an object...  This is not a complete
1547 * operation, but should plug 99.9% of the rest of the leaks.
1548 */
1549static void
1550vm_object_qcollapse(vm_object_t object)
1551{
1552	vm_object_t backing_object = object->backing_object;
1553
1554	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1555	VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
1556
1557	if (backing_object->ref_count != 1)
1558		return;
1559
1560	backing_object->ref_count += 2;
1561
1562	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1563
1564	backing_object->ref_count -= 2;
1565}
1566
1567/*
1568 *	vm_object_collapse:
1569 *
1570 *	Collapse an object with the object backing it.
1571 *	Pages in the backing object are moved into the
1572 *	parent, and the backing object is deallocated.
1573 */
1574void
1575vm_object_collapse(vm_object_t object)
1576{
1577	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1578
1579	while (TRUE) {
1580		vm_object_t backing_object;
1581
1582		/*
1583		 * Verify that the conditions are right for collapse:
1584		 *
1585		 * The object exists and the backing object exists.
1586		 */
1587		if ((backing_object = object->backing_object) == NULL)
1588			break;
1589
1590		/*
1591		 * we check the backing object first, because it is most likely
1592		 * not collapsable.
1593		 */
1594		VM_OBJECT_LOCK(backing_object);
1595		if (backing_object->handle != NULL ||
1596		    (backing_object->type != OBJT_DEFAULT &&
1597		     backing_object->type != OBJT_SWAP) ||
1598		    (backing_object->flags & OBJ_DEAD) ||
1599		    object->handle != NULL ||
1600		    (object->type != OBJT_DEFAULT &&
1601		     object->type != OBJT_SWAP) ||
1602		    (object->flags & OBJ_DEAD)) {
1603			VM_OBJECT_UNLOCK(backing_object);
1604			break;
1605		}
1606
1607		if (
1608		    object->paging_in_progress != 0 ||
1609		    backing_object->paging_in_progress != 0
1610		) {
1611			vm_object_qcollapse(object);
1612			VM_OBJECT_UNLOCK(backing_object);
1613			break;
1614		}
1615		/*
1616		 * We know that we can either collapse the backing object (if
1617		 * the parent is the only reference to it) or (perhaps) have
1618		 * the parent bypass the object if the parent happens to shadow
1619		 * all the resident pages in the entire backing object.
1620		 *
1621		 * This is ignoring pager-backed pages such as swap pages.
1622		 * vm_object_backing_scan fails the shadowing test in this
1623		 * case.
1624		 */
1625		if (backing_object->ref_count == 1) {
1626			/*
1627			 * If there is exactly one reference to the backing
1628			 * object, we can collapse it into the parent.
1629			 */
1630			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1631
1632			/*
1633			 * Move the pager from backing_object to object.
1634			 */
1635			if (backing_object->type == OBJT_SWAP) {
1636				/*
1637				 * swap_pager_copy() can sleep, in which case
1638				 * the backing_object's and object's locks are
1639				 * released and reacquired.
1640				 */
1641				swap_pager_copy(
1642				    backing_object,
1643				    object,
1644				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1645			}
1646			/*
1647			 * Object now shadows whatever backing_object did.
1648			 * Note that the reference to
1649			 * backing_object->backing_object moves from within
1650			 * backing_object to within object.
1651			 */
1652			LIST_REMOVE(object, shadow_list);
1653			backing_object->shadow_count--;
1654			backing_object->generation++;
1655			if (backing_object->backing_object) {
1656				VM_OBJECT_LOCK(backing_object->backing_object);
1657				LIST_REMOVE(backing_object, shadow_list);
1658				LIST_INSERT_HEAD(
1659				    &backing_object->backing_object->shadow_head,
1660				    object, shadow_list);
1661				/*
1662				 * The shadow_count has not changed.
1663				 */
1664				backing_object->backing_object->generation++;
1665				VM_OBJECT_UNLOCK(backing_object->backing_object);
1666			}
1667			object->backing_object = backing_object->backing_object;
1668			object->backing_object_offset +=
1669			    backing_object->backing_object_offset;
1670/* XXX */		VM_OBJECT_UNLOCK(object);
1671
1672			/*
1673			 * Discard backing_object.
1674			 *
1675			 * Since the backing object has no pages, no pager left,
1676			 * and no object references within it, all that is
1677			 * necessary is to dispose of it.
1678			 */
1679			KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object));
1680			VM_OBJECT_UNLOCK(backing_object);
1681
1682			mtx_lock(&vm_object_list_mtx);
1683			TAILQ_REMOVE(
1684			    &vm_object_list,
1685			    backing_object,
1686			    object_list
1687			);
1688			mtx_unlock(&vm_object_list_mtx);
1689
1690/* XXX */		VM_OBJECT_LOCK(object);
1691			uma_zfree(obj_zone, backing_object);
1692
1693			object_collapses++;
1694		} else {
1695			vm_object_t new_backing_object;
1696
1697			/*
1698			 * If we do not entirely shadow the backing object,
1699			 * there is nothing we can do so we give up.
1700			 */
1701			if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
1702				VM_OBJECT_UNLOCK(backing_object);
1703				break;
1704			}
1705
1706			/*
1707			 * Make the parent shadow the next object in the
1708			 * chain.  Deallocating backing_object will not remove
1709			 * it, since its reference count is at least 2.
1710			 */
1711			LIST_REMOVE(object, shadow_list);
1712			backing_object->shadow_count--;
1713			backing_object->generation++;
1714
1715			new_backing_object = backing_object->backing_object;
1716			if ((object->backing_object = new_backing_object) != NULL) {
1717				VM_OBJECT_LOCK(new_backing_object);
1718				LIST_INSERT_HEAD(
1719				    &new_backing_object->shadow_head,
1720				    object,
1721				    shadow_list
1722				);
1723				new_backing_object->shadow_count++;
1724				new_backing_object->generation++;
1725				vm_object_reference_locked(new_backing_object);
1726				VM_OBJECT_UNLOCK(new_backing_object);
1727				object->backing_object_offset +=
1728					backing_object->backing_object_offset;
1729			}
1730
1731			/*
1732			 * Drop the reference count on backing_object. Since
1733			 * its ref_count was at least 2, it will not vanish.
1734			 */
1735			backing_object->ref_count--;
1736			VM_OBJECT_UNLOCK(backing_object);
1737			object_bypasses++;
1738		}
1739
1740		/*
1741		 * Try again with this object's new backing object.
1742		 */
1743	}
1744}
1745
1746/*
1747 *	vm_object_page_remove:
1748 *
1749 *	Removes all physical pages in the given range from the
1750 *	object's list of pages.  If the range's end is zero, all
1751 *	physical pages from the range's start to the end of the object
1752 *	are deleted.
1753 *
1754 *	The object must be locked.
1755 */
1756void
1757vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1758    boolean_t clean_only)
1759{
1760	vm_page_t p, next;
1761
1762	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1763	if (object->resident_page_count == 0)
1764		return;
1765
1766	/*
1767	 * Since physically-backed objects do not use managed pages, we can't
1768	 * remove pages from the object (we must instead remove the page
1769	 * references, and then destroy the object).
1770	 */
1771	KASSERT(object->type != OBJT_PHYS,
1772	    ("attempt to remove pages from a physical object"));
1773
1774	vm_object_pip_add(object, 1);
1775again:
1776	vm_page_lock_queues();
1777	if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
1778		if (p->pindex < start) {
1779			p = vm_page_splay(start, object->root);
1780			if ((object->root = p)->pindex < start)
1781				p = TAILQ_NEXT(p, listq);
1782		}
1783	}
1784	/*
1785	 * Assert: the variable p is either (1) the page with the
1786	 * least pindex greater than or equal to the parameter pindex
1787	 * or (2) NULL.
1788	 */
1789	for (;
1790	     p != NULL && (p->pindex < end || end == 0);
1791	     p = next) {
1792		next = TAILQ_NEXT(p, listq);
1793
1794		if (p->wire_count != 0) {
1795			pmap_remove_all(p);
1796			if (!clean_only)
1797				p->valid = 0;
1798			continue;
1799		}
1800		if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
1801			goto again;
1802		if (clean_only && p->valid) {
1803			vm_page_test_dirty(p);
1804			if (p->valid & p->dirty)
1805				continue;
1806		}
1807		vm_page_busy(p);
1808		pmap_remove_all(p);
1809		vm_page_free(p);
1810	}
1811	vm_page_unlock_queues();
1812	vm_object_pip_wakeup(object);
1813}
1814
1815/*
1816 *	Routine:	vm_object_coalesce
1817 *	Function:	Coalesces two objects backing up adjoining
1818 *			regions of memory into a single object.
1819 *
1820 *	returns TRUE if objects were combined.
1821 *
1822 *	NOTE:	Only works at the moment if the second object is NULL -
1823 *		if it's not, which object do we lock first?
1824 *
1825 *	Parameters:
1826 *		prev_object	First object to coalesce
1827 *		prev_offset	Offset into prev_object
1828 *		next_object	Second object into coalesce
1829 *		next_offset	Offset into next_object
1830 *
1831 *		prev_size	Size of reference to prev_object
1832 *		next_size	Size of reference to next_object
1833 *
1834 *	Conditions:
1835 *	The object must *not* be locked.
1836 */
1837boolean_t
1838vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
1839	vm_size_t prev_size, vm_size_t next_size)
1840{
1841	vm_pindex_t next_pindex;
1842
1843	if (prev_object == NULL)
1844		return (TRUE);
1845	VM_OBJECT_LOCK(prev_object);
1846	if (prev_object->type != OBJT_DEFAULT &&
1847	    prev_object->type != OBJT_SWAP) {
1848		VM_OBJECT_UNLOCK(prev_object);
1849		return (FALSE);
1850	}
1851
1852	/*
1853	 * Try to collapse the object first
1854	 */
1855	vm_object_collapse(prev_object);
1856
1857	/*
1858	 * Can't coalesce if: . more than one reference . paged out . shadows
1859	 * another object . has a copy elsewhere (any of which mean that the
1860	 * pages not mapped to prev_entry may be in use anyway)
1861	 */
1862	if (prev_object->backing_object != NULL) {
1863		VM_OBJECT_UNLOCK(prev_object);
1864		return (FALSE);
1865	}
1866
1867	prev_size >>= PAGE_SHIFT;
1868	next_size >>= PAGE_SHIFT;
1869	next_pindex = prev_pindex + prev_size;
1870
1871	if ((prev_object->ref_count > 1) &&
1872	    (prev_object->size != next_pindex)) {
1873		VM_OBJECT_UNLOCK(prev_object);
1874		return (FALSE);
1875	}
1876
1877	/*
1878	 * Remove any pages that may still be in the object from a previous
1879	 * deallocation.
1880	 */
1881	if (next_pindex < prev_object->size) {
1882		vm_object_page_remove(prev_object,
1883				      next_pindex,
1884				      next_pindex + next_size, FALSE);
1885		if (prev_object->type == OBJT_SWAP)
1886			swap_pager_freespace(prev_object,
1887					     next_pindex, next_size);
1888	}
1889
1890	/*
1891	 * Extend the object if necessary.
1892	 */
1893	if (next_pindex + next_size > prev_object->size)
1894		prev_object->size = next_pindex + next_size;
1895
1896	VM_OBJECT_UNLOCK(prev_object);
1897	return (TRUE);
1898}
1899
1900void
1901vm_object_set_writeable_dirty(vm_object_t object)
1902{
1903	struct vnode *vp;
1904
1905	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1906	vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1907	if (object->type == OBJT_VNODE &&
1908	    (vp = (struct vnode *)object->handle) != NULL) {
1909		VI_LOCK(vp);
1910		if ((vp->v_iflag & VI_OBJDIRTY) == 0)
1911			vp->v_iflag |= VI_OBJDIRTY;
1912		VI_UNLOCK(vp);
1913	}
1914}
1915
1916#include "opt_ddb.h"
1917#ifdef DDB
1918#include <sys/kernel.h>
1919
1920#include <sys/cons.h>
1921
1922#include <ddb/ddb.h>
1923
1924static int
1925_vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
1926{
1927	vm_map_t tmpm;
1928	vm_map_entry_t tmpe;
1929	vm_object_t obj;
1930	int entcount;
1931
1932	if (map == 0)
1933		return 0;
1934
1935	if (entry == 0) {
1936		tmpe = map->header.next;
1937		entcount = map->nentries;
1938		while (entcount-- && (tmpe != &map->header)) {
1939			if (_vm_object_in_map(map, object, tmpe)) {
1940				return 1;
1941			}
1942			tmpe = tmpe->next;
1943		}
1944	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
1945		tmpm = entry->object.sub_map;
1946		tmpe = tmpm->header.next;
1947		entcount = tmpm->nentries;
1948		while (entcount-- && tmpe != &tmpm->header) {
1949			if (_vm_object_in_map(tmpm, object, tmpe)) {
1950				return 1;
1951			}
1952			tmpe = tmpe->next;
1953		}
1954	} else if ((obj = entry->object.vm_object) != NULL) {
1955		for (; obj; obj = obj->backing_object)
1956			if (obj == object) {
1957				return 1;
1958			}
1959	}
1960	return 0;
1961}
1962
1963static int
1964vm_object_in_map(vm_object_t object)
1965{
1966	struct proc *p;
1967
1968	/* sx_slock(&allproc_lock); */
1969	LIST_FOREACH(p, &allproc, p_list) {
1970		if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
1971			continue;
1972		if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
1973			/* sx_sunlock(&allproc_lock); */
1974			return 1;
1975		}
1976	}
1977	/* sx_sunlock(&allproc_lock); */
1978	if (_vm_object_in_map(kernel_map, object, 0))
1979		return 1;
1980	if (_vm_object_in_map(kmem_map, object, 0))
1981		return 1;
1982	if (_vm_object_in_map(pager_map, object, 0))
1983		return 1;
1984	if (_vm_object_in_map(buffer_map, object, 0))
1985		return 1;
1986	return 0;
1987}
1988
1989DB_SHOW_COMMAND(vmochk, vm_object_check)
1990{
1991	vm_object_t object;
1992
1993	/*
1994	 * make sure that internal objs are in a map somewhere
1995	 * and none have zero ref counts.
1996	 */
1997	TAILQ_FOREACH(object, &vm_object_list, object_list) {
1998		if (object->handle == NULL &&
1999		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2000			if (object->ref_count == 0) {
2001				db_printf("vmochk: internal obj has zero ref count: %ld\n",
2002					(long)object->size);
2003			}
2004			if (!vm_object_in_map(object)) {
2005				db_printf(
2006			"vmochk: internal obj is not in a map: "
2007			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2008				    object->ref_count, (u_long)object->size,
2009				    (u_long)object->size,
2010				    (void *)object->backing_object);
2011			}
2012		}
2013	}
2014}
2015
2016/*
2017 *	vm_object_print:	[ debug ]
2018 */
2019DB_SHOW_COMMAND(object, vm_object_print_static)
2020{
2021	/* XXX convert args. */
2022	vm_object_t object = (vm_object_t)addr;
2023	boolean_t full = have_addr;
2024
2025	vm_page_t p;
2026
2027	/* XXX count is an (unused) arg.  Avoid shadowing it. */
2028#define	count	was_count
2029
2030	int count;
2031
2032	if (object == NULL)
2033		return;
2034
2035	db_iprintf(
2036	    "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n",
2037	    object, (int)object->type, (uintmax_t)object->size,
2038	    object->resident_page_count, object->ref_count, object->flags);
2039	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2040	    object->shadow_count,
2041	    object->backing_object ? object->backing_object->ref_count : 0,
2042	    object->backing_object, (uintmax_t)object->backing_object_offset);
2043
2044	if (!full)
2045		return;
2046
2047	db_indent += 2;
2048	count = 0;
2049	TAILQ_FOREACH(p, &object->memq, listq) {
2050		if (count == 0)
2051			db_iprintf("memory:=");
2052		else if (count == 6) {
2053			db_printf("\n");
2054			db_iprintf(" ...");
2055			count = 0;
2056		} else
2057			db_printf(",");
2058		count++;
2059
2060		db_printf("(off=0x%jx,page=0x%jx)",
2061		    (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2062	}
2063	if (count != 0)
2064		db_printf("\n");
2065	db_indent -= 2;
2066}
2067
2068/* XXX. */
2069#undef count
2070
2071/* XXX need this non-static entry for calling from vm_map_print. */
2072void
2073vm_object_print(
2074        /* db_expr_t */ long addr,
2075	boolean_t have_addr,
2076	/* db_expr_t */ long count,
2077	char *modif)
2078{
2079	vm_object_print_static(addr, have_addr, count, modif);
2080}
2081
2082DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2083{
2084	vm_object_t object;
2085	int nl = 0;
2086	int c;
2087
2088	TAILQ_FOREACH(object, &vm_object_list, object_list) {
2089		vm_pindex_t idx, fidx;
2090		vm_pindex_t osize;
2091		vm_paddr_t pa = -1, padiff;
2092		int rcount;
2093		vm_page_t m;
2094
2095		db_printf("new object: %p\n", (void *)object);
2096		if (nl > 18) {
2097			c = cngetc();
2098			if (c != ' ')
2099				return;
2100			nl = 0;
2101		}
2102		nl++;
2103		rcount = 0;
2104		fidx = 0;
2105		osize = object->size;
2106		if (osize > 128)
2107			osize = 128;
2108		for (idx = 0; idx < osize; idx++) {
2109			m = vm_page_lookup(object, idx);
2110			if (m == NULL) {
2111				if (rcount) {
2112					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2113						(long)fidx, rcount, (long)pa);
2114					if (nl > 18) {
2115						c = cngetc();
2116						if (c != ' ')
2117							return;
2118						nl = 0;
2119					}
2120					nl++;
2121					rcount = 0;
2122				}
2123				continue;
2124			}
2125
2126
2127			if (rcount &&
2128				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2129				++rcount;
2130				continue;
2131			}
2132			if (rcount) {
2133				padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
2134				padiff >>= PAGE_SHIFT;
2135				padiff &= PQ_L2_MASK;
2136				if (padiff == 0) {
2137					pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
2138					++rcount;
2139					continue;
2140				}
2141				db_printf(" index(%ld)run(%d)pa(0x%lx)",
2142					(long)fidx, rcount, (long)pa);
2143				db_printf("pd(%ld)\n", (long)padiff);
2144				if (nl > 18) {
2145					c = cngetc();
2146					if (c != ' ')
2147						return;
2148					nl = 0;
2149				}
2150				nl++;
2151			}
2152			fidx = idx;
2153			pa = VM_PAGE_TO_PHYS(m);
2154			rcount = 1;
2155		}
2156		if (rcount) {
2157			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2158				(long)fidx, rcount, (long)pa);
2159			if (nl > 18) {
2160				c = cngetc();
2161				if (c != ' ')
2162					return;
2163				nl = 0;
2164			}
2165			nl++;
2166		}
2167	}
2168}
2169#endif /* DDB */
2170