vm_object.c revision 31252
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_object.c,v 1.100 1997/11/07 09:21:00 phk Exp $
65 */
66
67/*
68 *	Virtual memory object module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/proc.h>		/* for curproc, pageproc */
74#include <sys/vnode.h>
75#include <sys/vmmeter.h>
76#include <sys/mman.h>
77
78#include <vm/vm.h>
79#include <vm/vm_param.h>
80#include <vm/vm_prot.h>
81#include <sys/lock.h>
82#include <vm/pmap.h>
83#include <vm/vm_map.h>
84#include <vm/vm_object.h>
85#include <vm/vm_page.h>
86#include <vm/vm_pageout.h>
87#include <vm/vm_pager.h>
88#include <vm/swap_pager.h>
89#include <vm/vm_kern.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_zone.h>
92
93static void	vm_object_qcollapse __P((vm_object_t object));
94#ifdef not_used
95static void	vm_object_deactivate_pages __P((vm_object_t));
96#endif
97static void	vm_object_terminate __P((vm_object_t));
98static void	vm_object_cache_trim __P((void));
99
100/*
101 *	Virtual memory objects maintain the actual data
102 *	associated with allocated virtual memory.  A given
103 *	page of memory exists within exactly one object.
104 *
105 *	An object is only deallocated when all "references"
106 *	are given up.  Only one "reference" to a given
107 *	region of an object should be writeable.
108 *
109 *	Associated with each object is a list of all resident
110 *	memory pages belonging to that object; this list is
111 *	maintained by the "vm_page" module, and locked by the object's
112 *	lock.
113 *
114 *	Each object also records a "pager" routine which is
115 *	used to retrieve (and store) pages to the proper backing
116 *	storage.  In addition, objects may be backed by other
117 *	objects from which they were virtual-copied.
118 *
119 *	The only items within the object structure which are
120 *	modified after time of creation are:
121 *		reference count		locked by object's lock
122 *		pager routine		locked by object's lock
123 *
124 */
125
126int vm_object_cache_max;
127struct object_q vm_object_cached_list;
128static int vm_object_cached;		/* size of cached list */
129struct object_q vm_object_list;
130struct simplelock vm_object_list_lock;
131static long vm_object_count;		/* count of all objects */
132vm_object_t kernel_object;
133vm_object_t kmem_object;
134static struct vm_object kernel_object_store;
135static struct vm_object kmem_object_store;
136extern int vm_pageout_page_count;
137
138static long object_collapses;
139static long object_bypasses;
140static int next_index;
141static vm_zone_t obj_zone;
142static struct vm_zone obj_zone_store;
143#define VM_OBJECTS_INIT 256
144struct vm_object vm_objects_init[VM_OBJECTS_INIT];
145
146void
147_vm_object_allocate(type, size, object)
148	objtype_t type;
149	vm_size_t size;
150	register vm_object_t object;
151{
152	int incr;
153	TAILQ_INIT(&object->memq);
154	TAILQ_INIT(&object->shadow_head);
155
156	object->type = type;
157	object->size = size;
158	object->ref_count = 1;
159	object->flags = 0;
160	object->behavior = OBJ_NORMAL;
161	object->paging_in_progress = 0;
162	object->resident_page_count = 0;
163	object->shadow_count = 0;
164	object->pg_color = next_index;
165	if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
166		incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
167	else
168		incr = size;
169	next_index = (next_index + incr) & PQ_L2_MASK;
170	object->handle = NULL;
171	object->paging_offset = (vm_ooffset_t) 0;
172	object->backing_object = NULL;
173	object->backing_object_offset = (vm_ooffset_t) 0;
174	object->page_hint = NULL;
175
176	object->last_read = 0;
177
178	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
179	vm_object_count++;
180}
181
182/*
183 *	vm_object_init:
184 *
185 *	Initialize the VM objects module.
186 */
187void
188vm_object_init()
189{
190	TAILQ_INIT(&vm_object_cached_list);
191	TAILQ_INIT(&vm_object_list);
192	simple_lock_init(&vm_object_list_lock);
193	vm_object_count = 0;
194
195	vm_object_cache_max = 84;
196	if (cnt.v_page_count > 1000)
197		vm_object_cache_max += (cnt.v_page_count - 1000) / 4;
198
199	kernel_object = &kernel_object_store;
200	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
201	    kernel_object);
202
203	kmem_object = &kmem_object_store;
204	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
205	    kmem_object);
206
207	obj_zone = &obj_zone_store;
208	zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
209		vm_objects_init, VM_OBJECTS_INIT);
210}
211
212void
213vm_object_init2() {
214	zinitna(obj_zone, NULL, NULL, 0, 0, 0, 1);
215}
216
217/*
218 *	vm_object_allocate:
219 *
220 *	Returns a new object with the given size.
221 */
222
223vm_object_t
224vm_object_allocate(type, size)
225	objtype_t type;
226	vm_size_t size;
227{
228	register vm_object_t result;
229	result = (vm_object_t) zalloc(obj_zone);
230
231	_vm_object_allocate(type, size, result);
232
233	return (result);
234}
235
236
237/*
238 *	vm_object_reference:
239 *
240 *	Gets another reference to the given object.
241 */
242void
243vm_object_reference(object)
244	register vm_object_t object;
245{
246	if (object == NULL)
247		return;
248
249	if (object->ref_count == 0) {
250		if ((object->flags & OBJ_CANPERSIST) == 0)
251			panic("vm_object_reference: non-persistent object with 0 ref_count");
252		TAILQ_REMOVE(&vm_object_cached_list, object, cached_list);
253		vm_object_cached--;
254	}
255	object->ref_count++;
256}
257
258/*
259 *	vm_object_deallocate:
260 *
261 *	Release a reference to the specified object,
262 *	gained either through a vm_object_allocate
263 *	or a vm_object_reference call.  When all references
264 *	are gone, storage associated with this object
265 *	may be relinquished.
266 *
267 *	No object may be locked.
268 */
269void
270vm_object_deallocate(object)
271	vm_object_t object;
272{
273	vm_object_t temp;
274
275	while (object != NULL) {
276
277		if (object->ref_count == 0)
278			panic("vm_object_deallocate: object deallocated too many times");
279
280		/*
281		 * Lose the reference
282		 */
283		object->ref_count--;
284		if (object->ref_count != 0) {
285			if ((object->ref_count == 1) &&
286			    (object->handle == NULL) &&
287			    (object->type == OBJT_DEFAULT ||
288			     object->type == OBJT_SWAP)) {
289				vm_object_t robject;
290				robject = TAILQ_FIRST(&object->shadow_head);
291				if ((robject != NULL) &&
292				    (robject->handle == NULL) &&
293				    (robject->type == OBJT_DEFAULT ||
294				     robject->type == OBJT_SWAP)) {
295					int s;
296					robject->ref_count += 2;
297					object->ref_count += 2;
298
299					do {
300						s = splvm();
301						while (robject->paging_in_progress) {
302							robject->flags |= OBJ_PIPWNT;
303							tsleep(robject, PVM, "objde1", 0);
304						}
305
306						while (object->paging_in_progress) {
307							object->flags |= OBJ_PIPWNT;
308							tsleep(object, PVM, "objde2", 0);
309						}
310						splx(s);
311
312					} while( object->paging_in_progress || robject->paging_in_progress);
313
314					object->ref_count -= 2;
315					robject->ref_count -= 2;
316					if( robject->ref_count == 0) {
317						robject->ref_count += 1;
318						object = robject;
319						continue;
320					}
321					vm_object_collapse(robject);
322					return;
323				}
324			}
325			/*
326			 * If there are still references, then we are done.
327			 */
328			return;
329		}
330
331		if (object->type == OBJT_VNODE) {
332			struct vnode *vp = object->handle;
333
334			vp->v_flag &= ~VTEXT;
335		}
336
337		/*
338		 * See if this object can persist and has some resident
339		 * pages.  If so, enter it in the cache.
340		 */
341		if (object->flags & OBJ_CANPERSIST) {
342			if (object->resident_page_count != 0) {
343#if 0
344				vm_object_page_clean(object, 0, 0 ,TRUE, TRUE);
345#endif
346				TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
347				    cached_list);
348				vm_object_cached++;
349
350				vm_object_cache_trim();
351				return;
352			} else {
353				object->flags &= ~OBJ_CANPERSIST;
354			}
355		}
356
357		/*
358		 * Make sure no one uses us.
359		 */
360		object->flags |= OBJ_DEAD;
361
362		temp = object->backing_object;
363		if (temp) {
364			TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
365			--temp->shadow_count;
366		}
367		vm_object_terminate(object);
368		/* unlocks and deallocates object */
369		object = temp;
370	}
371}
372
373/*
374 *	vm_object_terminate actually destroys the specified object, freeing
375 *	up all previously used resources.
376 *
377 *	The object must be locked.
378 */
379static void
380vm_object_terminate(object)
381	register vm_object_t object;
382{
383	register vm_page_t p;
384	int s;
385
386	if (object->flags & OBJ_VFS_REF)
387		panic("vm_object_deallocate: freeing VFS_REF'ed object");
388
389	/*
390	 * wait for the pageout daemon to be done with the object
391	 */
392	s = splvm();
393	while (object->paging_in_progress) {
394		object->flags |= OBJ_PIPWNT;
395		tsleep(object, PVM, "objtrm", 0);
396	}
397	splx(s);
398
399	if (object->paging_in_progress != 0)
400		panic("vm_object_deallocate: pageout in progress");
401
402	/*
403	 * Clean and free the pages, as appropriate. All references to the
404	 * object are gone, so we don't need to lock it.
405	 */
406	if (object->type == OBJT_VNODE) {
407		struct vnode *vp = object->handle;
408		struct proc *cp = curproc;	/* XXX */
409		int waslocked;
410
411		waslocked = VOP_ISLOCKED(vp);
412		if (!waslocked)
413			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, cp);
414		vm_object_page_clean(object, 0, 0, TRUE, FALSE);
415		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
416		if (!waslocked)
417			VOP_UNLOCK(vp, 0, cp);
418	}
419
420	/*
421	 * Now free the pages. For internal objects, this also removes them
422	 * from paging queues.
423	 */
424	while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
425		if (p->busy || (p->flags & PG_BUSY))
426			printf("vm_object_terminate: freeing busy page\n");
427		PAGE_WAKEUP(p);
428		vm_page_free(p);
429		cnt.v_pfree++;
430	}
431
432	/*
433	 * Let the pager know object is dead.
434	 */
435	vm_pager_deallocate(object);
436
437	simple_lock(&vm_object_list_lock);
438	TAILQ_REMOVE(&vm_object_list, object, object_list);
439	vm_object_count--;
440	simple_unlock(&vm_object_list_lock);
441
442	wakeup(object);
443
444	/*
445	 * Free the space for the object.
446	 */
447	zfree(obj_zone, object);
448}
449
450/*
451 *	vm_object_page_clean
452 *
453 *	Clean all dirty pages in the specified range of object.
454 *	Leaves page on whatever queue it is currently on.
455 *
456 *	Odd semantics: if start == end, we clean everything.
457 *
458 *	The object must be locked.
459 */
460
461void
462vm_object_page_clean(object, start, end, syncio, lockflag)
463	vm_object_t object;
464	vm_pindex_t start;
465	vm_pindex_t end;
466	boolean_t syncio;
467	boolean_t lockflag;
468{
469	register vm_page_t p, np, tp;
470	register vm_offset_t tstart, tend;
471	vm_pindex_t pi;
472	int s;
473	struct vnode *vp;
474	int runlen;
475	int maxf;
476	int chkb;
477	int maxb;
478	int i;
479	vm_page_t maf[vm_pageout_page_count];
480	vm_page_t mab[vm_pageout_page_count];
481	vm_page_t ma[vm_pageout_page_count];
482	struct proc *pproc = curproc;	/* XXX */
483
484	if (object->type != OBJT_VNODE ||
485		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
486		return;
487
488	vp = object->handle;
489
490	if (lockflag)
491		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, pproc);
492	object->flags |= OBJ_CLEANING;
493
494	tstart = start;
495	if (end == 0) {
496		tend = object->size;
497	} else {
498		tend = end;
499	}
500	if ((tstart == 0) && (tend == object->size)) {
501		object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
502	}
503	for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq))
504		p->flags |= PG_CLEANCHK;
505
506rescan:
507	for(p = TAILQ_FIRST(&object->memq); p; p = np) {
508		np = TAILQ_NEXT(p, listq);
509
510		pi = p->pindex;
511		if (((p->flags & PG_CLEANCHK) == 0) ||
512			(pi < tstart) || (pi >= tend) ||
513			(p->valid == 0) ||
514			((p->queue - p->pc) == PQ_CACHE)) {
515			p->flags &= ~PG_CLEANCHK;
516			continue;
517		}
518
519		vm_page_test_dirty(p);
520		if ((p->dirty & p->valid) == 0) {
521			p->flags &= ~PG_CLEANCHK;
522			continue;
523		}
524
525		s = splvm();
526		if ((p->flags & PG_BUSY) || p->busy) {
527			p->flags |= PG_WANTED|PG_REFERENCED;
528			tsleep(p, PVM, "vpcwai", 0);
529			splx(s);
530			goto rescan;
531		}
532		splx(s);
533
534		s = splvm();
535		maxf = 0;
536		for(i=1;i<vm_pageout_page_count;i++) {
537			if (tp = vm_page_lookup(object, pi + i)) {
538				if ((tp->flags & PG_BUSY) ||
539					(tp->flags & PG_CLEANCHK) == 0)
540					break;
541				if((tp->queue - tp->pc) == PQ_CACHE) {
542					tp->flags &= ~PG_CLEANCHK;
543					break;
544				}
545				vm_page_test_dirty(tp);
546				if ((tp->dirty & tp->valid) == 0) {
547					tp->flags &= ~PG_CLEANCHK;
548					break;
549				}
550				maf[ i - 1 ] = tp;
551				maxf++;
552				continue;
553			}
554			break;
555		}
556
557		maxb = 0;
558		chkb = vm_pageout_page_count -  maxf;
559		if (chkb) {
560			for(i = 1; i < chkb;i++) {
561				if (tp = vm_page_lookup(object, pi - i)) {
562					if ((tp->flags & PG_BUSY) ||
563						(tp->flags & PG_CLEANCHK) == 0)
564						break;
565					if((tp->queue - tp->pc) == PQ_CACHE) {
566						tp->flags &= ~PG_CLEANCHK;
567						break;
568					}
569					vm_page_test_dirty(tp);
570					if ((tp->dirty & tp->valid) == 0) {
571						tp->flags &= ~PG_CLEANCHK;
572						break;
573					}
574					mab[ i - 1 ] = tp;
575					maxb++;
576					continue;
577				}
578				break;
579			}
580		}
581
582		for(i=0;i<maxb;i++) {
583			int index = (maxb - i) - 1;
584			ma[index] = mab[i];
585			ma[index]->flags |= PG_BUSY;
586			ma[index]->flags &= ~PG_CLEANCHK;
587			vm_page_protect(ma[index], VM_PROT_READ);
588		}
589		vm_page_protect(p, VM_PROT_READ);
590		p->flags |= PG_BUSY;
591		p->flags &= ~PG_CLEANCHK;
592		ma[maxb] = p;
593		for(i=0;i<maxf;i++) {
594			int index = (maxb + i) + 1;
595			ma[index] = maf[i];
596			ma[index]->flags |= PG_BUSY;
597			ma[index]->flags &= ~PG_CLEANCHK;
598			vm_page_protect(ma[index], VM_PROT_READ);
599		}
600		runlen = maxb + maxf + 1;
601		splx(s);
602		vm_pageout_flush(ma, runlen, 0);
603		goto rescan;
604	}
605
606	VOP_FSYNC(vp, NULL, syncio, curproc);
607
608	if (lockflag)
609		VOP_UNLOCK(vp, 0, pproc);
610	object->flags &= ~OBJ_CLEANING;
611	return;
612}
613
614#ifdef not_used
615/* XXX I cannot tell if this should be an exported symbol */
616/*
617 *	vm_object_deactivate_pages
618 *
619 *	Deactivate all pages in the specified object.  (Keep its pages
620 *	in memory even though it is no longer referenced.)
621 *
622 *	The object must be locked.
623 */
624static void
625vm_object_deactivate_pages(object)
626	register vm_object_t object;
627{
628	register vm_page_t p, next;
629
630	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
631		next = TAILQ_NEXT(p, listq);
632		vm_page_deactivate(p);
633	}
634}
635#endif
636
637/*
638 *	Trim the object cache to size.
639 */
640static void
641vm_object_cache_trim()
642{
643	register vm_object_t object;
644
645	while (vm_object_cached > vm_object_cache_max) {
646		object = TAILQ_FIRST(&vm_object_cached_list);
647
648		vm_object_reference(object);
649		pager_cache(object, FALSE);
650	}
651}
652
653
654/*
655 *	vm_object_pmap_copy:
656 *
657 *	Makes all physical pages in the specified
658 *	object range copy-on-write.  No writeable
659 *	references to these pages should remain.
660 *
661 *	The object must *not* be locked.
662 */
663void
664vm_object_pmap_copy(object, start, end)
665	register vm_object_t object;
666	register vm_pindex_t start;
667	register vm_pindex_t end;
668{
669	register vm_page_t p;
670
671	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
672		return;
673
674	for (p = TAILQ_FIRST(&object->memq);
675		p != NULL;
676		p = TAILQ_NEXT(p, listq)) {
677		vm_page_protect(p, VM_PROT_READ);
678	}
679
680	object->flags &= ~OBJ_WRITEABLE;
681}
682
683/*
684 *	vm_object_pmap_remove:
685 *
686 *	Removes all physical pages in the specified
687 *	object range from all physical maps.
688 *
689 *	The object must *not* be locked.
690 */
691void
692vm_object_pmap_remove(object, start, end)
693	register vm_object_t object;
694	register vm_pindex_t start;
695	register vm_pindex_t end;
696{
697	register vm_page_t p;
698	if (object == NULL)
699		return;
700	for (p = TAILQ_FIRST(&object->memq);
701		p != NULL;
702		p = TAILQ_NEXT(p, listq)) {
703		if (p->pindex >= start && p->pindex < end)
704			vm_page_protect(p, VM_PROT_NONE);
705	}
706	if ((start == 0) && (object->size == end))
707		object->flags &= ~OBJ_WRITEABLE;
708}
709
710/*
711 *	vm_object_madvise:
712 *
713 *	Implements the madvise function at the object/page level.
714 */
715void
716vm_object_madvise(object, pindex, count, advise)
717	vm_object_t object;
718	vm_pindex_t pindex;
719	int count;
720	int advise;
721{
722	int s;
723	vm_pindex_t end, tpindex;
724	vm_object_t tobject;
725	vm_page_t m;
726
727	if (object == NULL)
728		return;
729
730	end = pindex + count;
731
732	for (; pindex < end; pindex += 1) {
733
734relookup:
735		tobject = object;
736		tpindex = pindex;
737shadowlookup:
738		m = vm_page_lookup(tobject, tpindex);
739		if (m == NULL) {
740			if (tobject->type != OBJT_DEFAULT) {
741				continue;
742			}
743
744			tobject = tobject->backing_object;
745			if ((tobject == NULL) || (tobject->ref_count != 1)) {
746				continue;
747			}
748			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
749			goto shadowlookup;
750		}
751
752		/*
753		 * If the page is busy or not in a normal active state,
754		 * we skip it.  Things can break if we mess with pages
755		 * in any of the below states.
756		 */
757		if (m->hold_count || m->wire_count ||
758			m->valid != VM_PAGE_BITS_ALL) {
759			continue;
760		}
761
762		if (m->busy || (m->flags & PG_BUSY)) {
763			s = splvm();
764			if (m->busy || (m->flags & PG_BUSY)) {
765				m->flags |= PG_WANTED;
766				tsleep(m, PVM, "madvpw", 0);
767			}
768			splx(s);
769			goto relookup;
770		}
771
772		if (advise == MADV_WILLNEED) {
773			if (m->queue != PQ_ACTIVE)
774				vm_page_activate(m);
775		} else if (advise == MADV_DONTNEED) {
776			vm_page_deactivate(m);
777		} else if (advise == MADV_FREE) {
778			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
779			m->dirty = 0;
780			/*
781			 * Force a demand zero if attempt to read from swap.
782			 * We currently don't handle vnode files correctly,
783			 * and will reread stale contents unnecessarily.
784			 */
785			if (object->type == OBJT_SWAP)
786				swap_pager_dmzspace(tobject, m->pindex, 1);
787		}
788	}
789}
790
791/*
792 *	vm_object_shadow:
793 *
794 *	Create a new object which is backed by the
795 *	specified existing object range.  The source
796 *	object reference is deallocated.
797 *
798 *	The new object and offset into that object
799 *	are returned in the source parameters.
800 */
801
802void
803vm_object_shadow(object, offset, length)
804	vm_object_t *object;	/* IN/OUT */
805	vm_ooffset_t *offset;	/* IN/OUT */
806	vm_size_t length;
807{
808	register vm_object_t source;
809	register vm_object_t result;
810
811	source = *object;
812
813	/*
814	 * Allocate a new object with the given length
815	 */
816
817	if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
818		panic("vm_object_shadow: no object for shadowing");
819
820	/*
821	 * The new object shadows the source object, adding a reference to it.
822	 * Our caller changes his reference to point to the new object,
823	 * removing a reference to the source object.  Net result: no change
824	 * of reference count.
825	 */
826	result->backing_object = source;
827	if (source) {
828		TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list);
829		++source->shadow_count;
830	}
831
832	/*
833	 * Store the offset into the source object, and fix up the offset into
834	 * the new object.
835	 */
836
837	result->backing_object_offset = *offset;
838
839	/*
840	 * Return the new things
841	 */
842
843	*offset = 0;
844	*object = result;
845}
846
847
848/*
849 * this version of collapse allows the operation to occur earlier and
850 * when paging_in_progress is true for an object...  This is not a complete
851 * operation, but should plug 99.9% of the rest of the leaks.
852 */
853static void
854vm_object_qcollapse(object)
855	register vm_object_t object;
856{
857	register vm_object_t backing_object;
858	register vm_pindex_t backing_offset_index, paging_offset_index;
859	vm_pindex_t backing_object_paging_offset_index;
860	vm_pindex_t new_pindex;
861	register vm_page_t p, pp;
862	register vm_size_t size;
863
864	backing_object = object->backing_object;
865	if (backing_object->ref_count != 1)
866		return;
867
868	backing_object->ref_count += 2;
869
870	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
871	backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset);
872	paging_offset_index = OFF_TO_IDX(object->paging_offset);
873	size = object->size;
874	p = TAILQ_FIRST(&backing_object->memq);
875	while (p) {
876		vm_page_t next;
877
878		next = TAILQ_NEXT(p, listq);
879		if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) ||
880		    ((p->queue - p->pc) == PQ_CACHE) ||
881		    !p->valid || p->hold_count || p->wire_count || p->busy) {
882			p = next;
883			continue;
884		}
885		new_pindex = p->pindex - backing_offset_index;
886		if (p->pindex < backing_offset_index ||
887		    new_pindex >= size) {
888			if (backing_object->type == OBJT_SWAP)
889				swap_pager_freespace(backing_object,
890				    backing_object_paging_offset_index+p->pindex,
891				    1);
892			vm_page_protect(p, VM_PROT_NONE);
893			vm_page_free(p);
894		} else {
895			pp = vm_page_lookup(object, new_pindex);
896			if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
897				    paging_offset_index + new_pindex, NULL, NULL))) {
898				if (backing_object->type == OBJT_SWAP)
899					swap_pager_freespace(backing_object,
900					    backing_object_paging_offset_index + p->pindex, 1);
901				vm_page_protect(p, VM_PROT_NONE);
902				vm_page_free(p);
903			} else {
904				if (backing_object->type == OBJT_SWAP)
905					swap_pager_freespace(backing_object,
906					    backing_object_paging_offset_index + p->pindex, 1);
907				vm_page_rename(p, object, new_pindex);
908				vm_page_protect(p, VM_PROT_NONE);
909				p->dirty = VM_PAGE_BITS_ALL;
910			}
911		}
912		p = next;
913	}
914	backing_object->ref_count -= 2;
915}
916
917/*
918 *	vm_object_collapse:
919 *
920 *	Collapse an object with the object backing it.
921 *	Pages in the backing object are moved into the
922 *	parent, and the backing object is deallocated.
923 */
924void
925vm_object_collapse(object)
926	vm_object_t object;
927
928{
929	vm_object_t backing_object;
930	vm_ooffset_t backing_offset;
931	vm_size_t size;
932	vm_pindex_t new_pindex, backing_offset_index;
933	vm_page_t p, pp;
934
935	while (TRUE) {
936		/*
937		 * Verify that the conditions are right for collapse:
938		 *
939		 * The object exists and no pages in it are currently being paged
940		 * out.
941		 */
942		if (object == NULL)
943			return;
944
945		/*
946		 * Make sure there is a backing object.
947		 */
948		if ((backing_object = object->backing_object) == NULL)
949			return;
950
951		/*
952		 * we check the backing object first, because it is most likely
953		 * not collapsable.
954		 */
955		if (backing_object->handle != NULL ||
956		    (backing_object->type != OBJT_DEFAULT &&
957		     backing_object->type != OBJT_SWAP) ||
958		    (backing_object->flags & OBJ_DEAD) ||
959		    object->handle != NULL ||
960		    (object->type != OBJT_DEFAULT &&
961		     object->type != OBJT_SWAP) ||
962		    (object->flags & OBJ_DEAD)) {
963			return;
964		}
965
966		if (object->paging_in_progress != 0 ||
967		    backing_object->paging_in_progress != 0) {
968			vm_object_qcollapse(object);
969			return;
970		}
971
972		/*
973		 * We know that we can either collapse the backing object (if
974		 * the parent is the only reference to it) or (perhaps) remove
975		 * the parent's reference to it.
976		 */
977
978		backing_offset = object->backing_object_offset;
979		backing_offset_index = OFF_TO_IDX(backing_offset);
980		size = object->size;
981
982		/*
983		 * If there is exactly one reference to the backing object, we
984		 * can collapse it into the parent.
985		 */
986
987		if (backing_object->ref_count == 1) {
988
989			backing_object->flags |= OBJ_DEAD;
990			/*
991			 * We can collapse the backing object.
992			 *
993			 * Move all in-memory pages from backing_object to the
994			 * parent.  Pages that have been paged out will be
995			 * overwritten by any of the parent's pages that
996			 * shadow them.
997			 */
998
999			while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) {
1000
1001				new_pindex = p->pindex - backing_offset_index;
1002
1003				/*
1004				 * If the parent has a page here, or if this
1005				 * page falls outside the parent, dispose of
1006				 * it.
1007				 *
1008				 * Otherwise, move it as planned.
1009				 */
1010
1011				if (p->pindex < backing_offset_index ||
1012				    new_pindex >= size) {
1013					vm_page_protect(p, VM_PROT_NONE);
1014					PAGE_WAKEUP(p);
1015					vm_page_free(p);
1016				} else {
1017					pp = vm_page_lookup(object, new_pindex);
1018					if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
1019					    OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) {
1020						vm_page_protect(p, VM_PROT_NONE);
1021						PAGE_WAKEUP(p);
1022						vm_page_free(p);
1023					} else {
1024						vm_page_protect(p, VM_PROT_NONE);
1025						vm_page_rename(p, object, new_pindex);
1026						p->dirty = VM_PAGE_BITS_ALL;
1027					}
1028				}
1029			}
1030
1031			/*
1032			 * Move the pager from backing_object to object.
1033			 */
1034
1035			if (backing_object->type == OBJT_SWAP) {
1036				backing_object->paging_in_progress++;
1037				if (object->type == OBJT_SWAP) {
1038					object->paging_in_progress++;
1039					/*
1040					 * copy shadow object pages into ours
1041					 * and destroy unneeded pages in
1042					 * shadow object.
1043					 */
1044					swap_pager_copy(
1045					    backing_object,
1046					    OFF_TO_IDX(backing_object->paging_offset),
1047					    object,
1048					    OFF_TO_IDX(object->paging_offset),
1049					    OFF_TO_IDX(object->backing_object_offset));
1050					vm_object_pip_wakeup(object);
1051				} else {
1052					object->paging_in_progress++;
1053					/*
1054					 * move the shadow backing_object's pager data to
1055					 * "object" and convert "object" type to OBJT_SWAP.
1056					 */
1057					object->type = OBJT_SWAP;
1058					object->un_pager.swp.swp_nblocks =
1059					    backing_object->un_pager.swp.swp_nblocks;
1060					object->un_pager.swp.swp_allocsize =
1061					    backing_object->un_pager.swp.swp_allocsize;
1062					object->un_pager.swp.swp_blocks =
1063					    backing_object->un_pager.swp.swp_blocks;
1064					object->un_pager.swp.swp_poip =		/* XXX */
1065					    backing_object->un_pager.swp.swp_poip;
1066					object->paging_offset = backing_object->paging_offset + backing_offset;
1067					TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list);
1068
1069					/*
1070					 * Convert backing object from OBJT_SWAP to
1071					 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is
1072					 * actually necessary.
1073					 */
1074					backing_object->type = OBJT_DEFAULT;
1075					TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list);
1076					/*
1077					 * free unnecessary blocks
1078					 */
1079					swap_pager_freespace(object, 0,
1080						OFF_TO_IDX(object->paging_offset));
1081					vm_object_pip_wakeup(object);
1082				}
1083
1084				vm_object_pip_wakeup(backing_object);
1085			}
1086			/*
1087			 * Object now shadows whatever backing_object did.
1088			 * Note that the reference to backing_object->backing_object
1089			 * moves from within backing_object to within object.
1090			 */
1091
1092			TAILQ_REMOVE(&object->backing_object->shadow_head, object,
1093			    shadow_list);
1094			--object->backing_object->shadow_count;
1095			if (backing_object->backing_object) {
1096				TAILQ_REMOVE(&backing_object->backing_object->shadow_head,
1097				    backing_object, shadow_list);
1098				--backing_object->backing_object->shadow_count;
1099			}
1100			object->backing_object = backing_object->backing_object;
1101			if (object->backing_object) {
1102				TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
1103				    object, shadow_list);
1104				++object->backing_object->shadow_count;
1105			}
1106
1107			object->backing_object_offset += backing_object->backing_object_offset;
1108			/*
1109			 * Discard backing_object.
1110			 *
1111			 * Since the backing object has no pages, no pager left,
1112			 * and no object references within it, all that is
1113			 * necessary is to dispose of it.
1114			 */
1115
1116			TAILQ_REMOVE(&vm_object_list, backing_object,
1117			    object_list);
1118			vm_object_count--;
1119
1120			zfree(obj_zone, backing_object);
1121
1122			object_collapses++;
1123		} else {
1124			/*
1125			 * If all of the pages in the backing object are
1126			 * shadowed by the parent object, the parent object no
1127			 * longer has to shadow the backing object; it can
1128			 * shadow the next one in the chain.
1129			 *
1130			 * The backing object must not be paged out - we'd have
1131			 * to check all of the paged-out pages, as well.
1132			 */
1133
1134			if (backing_object->type != OBJT_DEFAULT) {
1135				return;
1136			}
1137			/*
1138			 * Should have a check for a 'small' number of pages
1139			 * here.
1140			 */
1141
1142			for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) {
1143				new_pindex = p->pindex - backing_offset_index;
1144
1145				/*
1146				 * If the parent has a page here, or if this
1147				 * page falls outside the parent, keep going.
1148				 *
1149				 * Otherwise, the backing_object must be left in
1150				 * the chain.
1151				 */
1152
1153				if (p->pindex >= backing_offset_index &&
1154					new_pindex <= size) {
1155
1156					pp = vm_page_lookup(object, new_pindex);
1157
1158					if ((pp == NULL || pp->valid == 0) &&
1159				   	    !vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) {
1160						/*
1161						 * Page still needed. Can't go any
1162						 * further.
1163						 */
1164						return;
1165					}
1166				}
1167			}
1168
1169			/*
1170			 * Make the parent shadow the next object in the
1171			 * chain.  Deallocating backing_object will not remove
1172			 * it, since its reference count is at least 2.
1173			 */
1174
1175			TAILQ_REMOVE(&object->backing_object->shadow_head,
1176			    object, shadow_list);
1177			--object->backing_object->shadow_count;
1178			vm_object_reference(object->backing_object = backing_object->backing_object);
1179			if (object->backing_object) {
1180				TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
1181				    object, shadow_list);
1182				++object->backing_object->shadow_count;
1183			}
1184			object->backing_object_offset += backing_object->backing_object_offset;
1185
1186			/*
1187			 * Drop the reference count on backing_object. Since
1188			 * its ref_count was at least 2, it will not vanish;
1189			 * so we don't need to call vm_object_deallocate.
1190			 */
1191			if (backing_object->ref_count == 1)
1192				printf("should have called obj deallocate\n");
1193			backing_object->ref_count--;
1194
1195			object_bypasses++;
1196
1197		}
1198
1199		/*
1200		 * Try again with this object's new backing object.
1201		 */
1202	}
1203}
1204
1205/*
1206 *	vm_object_page_remove: [internal]
1207 *
1208 *	Removes all physical pages in the specified
1209 *	object range from the object's list of pages.
1210 *
1211 *	The object must be locked.
1212 */
1213void
1214vm_object_page_remove(object, start, end, clean_only)
1215	register vm_object_t object;
1216	register vm_pindex_t start;
1217	register vm_pindex_t end;
1218	boolean_t clean_only;
1219{
1220	register vm_page_t p, next;
1221	unsigned int size;
1222	int s;
1223
1224	if (object == NULL)
1225		return;
1226
1227	object->paging_in_progress++;
1228again:
1229	size = end - start;
1230	if (size > 4 || size >= object->size / 4) {
1231		for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
1232			next = TAILQ_NEXT(p, listq);
1233			if ((start <= p->pindex) && (p->pindex < end)) {
1234				if (p->wire_count != 0) {
1235					vm_page_protect(p, VM_PROT_NONE);
1236					p->valid = 0;
1237					continue;
1238				}
1239
1240				/*
1241				 * The busy flags are only cleared at
1242				 * interrupt -- minimize the spl transitions
1243				 */
1244				if ((p->flags & PG_BUSY) || p->busy) {
1245					s = splvm();
1246					if ((p->flags & PG_BUSY) || p->busy) {
1247						p->flags |= PG_WANTED;
1248						tsleep(p, PVM, "vmopar", 0);
1249						splx(s);
1250						goto again;
1251					}
1252					splx(s);
1253				}
1254
1255				if (clean_only) {
1256					vm_page_test_dirty(p);
1257					if (p->valid & p->dirty)
1258						continue;
1259				}
1260				vm_page_protect(p, VM_PROT_NONE);
1261				PAGE_WAKEUP(p);
1262				vm_page_free(p);
1263			}
1264		}
1265	} else {
1266		while (size > 0) {
1267			if ((p = vm_page_lookup(object, start)) != 0) {
1268				if (p->wire_count != 0) {
1269					p->valid = 0;
1270					vm_page_protect(p, VM_PROT_NONE);
1271					start += 1;
1272					size -= 1;
1273					continue;
1274				}
1275				/*
1276				 * The busy flags are only cleared at
1277				 * interrupt -- minimize the spl transitions
1278				 */
1279				if ((p->flags & PG_BUSY) || p->busy) {
1280					s = splvm();
1281					if ((p->flags & PG_BUSY) || p->busy) {
1282						p->flags |= PG_WANTED;
1283						tsleep(p, PVM, "vmopar", 0);
1284						splx(s);
1285						goto again;
1286					}
1287					splx(s);
1288				}
1289				if (clean_only) {
1290					vm_page_test_dirty(p);
1291					if (p->valid & p->dirty) {
1292						start += 1;
1293						size -= 1;
1294						continue;
1295					}
1296				}
1297				vm_page_protect(p, VM_PROT_NONE);
1298				PAGE_WAKEUP(p);
1299				vm_page_free(p);
1300			}
1301			start += 1;
1302			size -= 1;
1303		}
1304	}
1305	vm_object_pip_wakeup(object);
1306}
1307
1308/*
1309 *	Routine:	vm_object_coalesce
1310 *	Function:	Coalesces two objects backing up adjoining
1311 *			regions of memory into a single object.
1312 *
1313 *	returns TRUE if objects were combined.
1314 *
1315 *	NOTE:	Only works at the moment if the second object is NULL -
1316 *		if it's not, which object do we lock first?
1317 *
1318 *	Parameters:
1319 *		prev_object	First object to coalesce
1320 *		prev_offset	Offset into prev_object
1321 *		next_object	Second object into coalesce
1322 *		next_offset	Offset into next_object
1323 *
1324 *		prev_size	Size of reference to prev_object
1325 *		next_size	Size of reference to next_object
1326 *
1327 *	Conditions:
1328 *	The object must *not* be locked.
1329 */
1330boolean_t
1331vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
1332	register vm_object_t prev_object;
1333	vm_pindex_t prev_pindex;
1334	vm_size_t prev_size, next_size;
1335{
1336	vm_size_t newsize;
1337
1338	if (prev_object == NULL) {
1339		return (TRUE);
1340	}
1341
1342	if (prev_object->type != OBJT_DEFAULT) {
1343		return (FALSE);
1344	}
1345
1346	/*
1347	 * Try to collapse the object first
1348	 */
1349	vm_object_collapse(prev_object);
1350
1351	/*
1352	 * Can't coalesce if: . more than one reference . paged out . shadows
1353	 * another object . has a copy elsewhere (any of which mean that the
1354	 * pages not mapped to prev_entry may be in use anyway)
1355	 */
1356
1357	if (prev_object->backing_object != NULL) {
1358		return (FALSE);
1359	}
1360
1361	prev_size >>= PAGE_SHIFT;
1362	next_size >>= PAGE_SHIFT;
1363
1364	if ((prev_object->ref_count > 1) &&
1365	    (prev_object->size != prev_pindex + prev_size)) {
1366		return (FALSE);
1367	}
1368
1369	/*
1370	 * Remove any pages that may still be in the object from a previous
1371	 * deallocation.
1372	 */
1373
1374	vm_object_page_remove(prev_object,
1375	    prev_pindex + prev_size,
1376	    prev_pindex + prev_size + next_size, FALSE);
1377
1378	/*
1379	 * Extend the object if necessary.
1380	 */
1381	newsize = prev_pindex + prev_size + next_size;
1382	if (newsize > prev_object->size)
1383		prev_object->size = newsize;
1384
1385	return (TRUE);
1386}
1387
1388#include "opt_ddb.h"
1389#ifdef DDB
1390#include <sys/kernel.h>
1391
1392#include <machine/cons.h>
1393
1394#include <ddb/ddb.h>
1395
1396static int	_vm_object_in_map __P((vm_map_t map, vm_object_t object,
1397				       vm_map_entry_t entry));
1398static int	vm_object_in_map __P((vm_object_t object));
1399
1400static int
1401_vm_object_in_map(map, object, entry)
1402	vm_map_t map;
1403	vm_object_t object;
1404	vm_map_entry_t entry;
1405{
1406	vm_map_t tmpm;
1407	vm_map_entry_t tmpe;
1408	vm_object_t obj;
1409	int entcount;
1410
1411	if (map == 0)
1412		return 0;
1413
1414	if (entry == 0) {
1415		tmpe = map->header.next;
1416		entcount = map->nentries;
1417		while (entcount-- && (tmpe != &map->header)) {
1418			if( _vm_object_in_map(map, object, tmpe)) {
1419				return 1;
1420			}
1421			tmpe = tmpe->next;
1422		}
1423	} else if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1424		tmpm = entry->object.share_map;
1425		tmpe = tmpm->header.next;
1426		entcount = tmpm->nentries;
1427		while (entcount-- && tmpe != &tmpm->header) {
1428			if( _vm_object_in_map(tmpm, object, tmpe)) {
1429				return 1;
1430			}
1431			tmpe = tmpe->next;
1432		}
1433	} else if (obj = entry->object.vm_object) {
1434		for(; obj; obj=obj->backing_object)
1435			if( obj == object) {
1436				return 1;
1437			}
1438	}
1439	return 0;
1440}
1441
1442static int
1443vm_object_in_map( object)
1444	vm_object_t object;
1445{
1446	struct proc *p;
1447	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1448		if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
1449			continue;
1450		if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0))
1451			return 1;
1452	}
1453	if( _vm_object_in_map( kernel_map, object, 0))
1454		return 1;
1455	if( _vm_object_in_map( kmem_map, object, 0))
1456		return 1;
1457	if( _vm_object_in_map( pager_map, object, 0))
1458		return 1;
1459	if( _vm_object_in_map( buffer_map, object, 0))
1460		return 1;
1461	if( _vm_object_in_map( io_map, object, 0))
1462		return 1;
1463	if( _vm_object_in_map( phys_map, object, 0))
1464		return 1;
1465	if( _vm_object_in_map( mb_map, object, 0))
1466		return 1;
1467	if( _vm_object_in_map( u_map, object, 0))
1468		return 1;
1469	return 0;
1470}
1471
1472DB_SHOW_COMMAND(vmochk, vm_object_check)
1473{
1474	vm_object_t object;
1475
1476	/*
1477	 * make sure that internal objs are in a map somewhere
1478	 * and none have zero ref counts.
1479	 */
1480	for (object = TAILQ_FIRST(&vm_object_list);
1481			object != NULL;
1482			object = TAILQ_NEXT(object, object_list)) {
1483		if (object->handle == NULL &&
1484		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1485			if (object->ref_count == 0) {
1486				db_printf("vmochk: internal obj has zero ref count: %d\n",
1487					object->size);
1488			}
1489			if (!vm_object_in_map(object)) {
1490				db_printf("vmochk: internal obj is not in a map: "
1491		"ref: %d, size: %d: 0x%x, backing_object: 0x%x\n",
1492				    object->ref_count, object->size,
1493				    object->size, object->backing_object);
1494			}
1495		}
1496	}
1497}
1498
1499/*
1500 *	vm_object_print:	[ debug ]
1501 */
1502DB_SHOW_COMMAND(object, vm_object_print_static)
1503{
1504	/* XXX convert args. */
1505	vm_object_t object = (vm_object_t)addr;
1506	boolean_t full = have_addr;
1507
1508	register vm_page_t p;
1509
1510	/* XXX count is an (unused) arg.  Avoid shadowing it. */
1511#define	count	was_count
1512
1513	register int count;
1514
1515	if (object == NULL)
1516		return;
1517
1518	db_iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1519	    (int) object, (int) object->size,
1520	    object->resident_page_count, object->ref_count);
1521	db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n",
1522	    (int) object->paging_offset,
1523	    (int) object->backing_object, (int) object->backing_object_offset);
1524	db_printf("cache: next=%p, prev=%p\n",
1525	    TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list));
1526
1527	if (!full)
1528		return;
1529
1530	db_indent += 2;
1531	count = 0;
1532	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
1533		if (count == 0)
1534			db_iprintf("memory:=");
1535		else if (count == 6) {
1536			db_printf("\n");
1537			db_iprintf(" ...");
1538			count = 0;
1539		} else
1540			db_printf(",");
1541		count++;
1542
1543		db_printf("(off=0x%lx,page=0x%lx)",
1544		    (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1545	}
1546	if (count != 0)
1547		db_printf("\n");
1548	db_indent -= 2;
1549}
1550
1551/* XXX. */
1552#undef count
1553
1554/* XXX need this non-static entry for calling from vm_map_print. */
1555void
1556vm_object_print(addr, have_addr, count, modif)
1557	db_expr_t addr;
1558	boolean_t have_addr;
1559	db_expr_t count;
1560	char *modif;
1561{
1562	vm_object_print_static(addr, have_addr, count, modif);
1563}
1564
1565DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1566{
1567	vm_object_t object;
1568	int nl = 0;
1569	int c;
1570	for (object = TAILQ_FIRST(&vm_object_list);
1571			object != NULL;
1572			object = TAILQ_NEXT(object, object_list)) {
1573		vm_pindex_t idx, fidx;
1574		vm_pindex_t osize;
1575		vm_offset_t pa = -1, padiff;
1576		int rcount;
1577		vm_page_t m;
1578
1579		db_printf("new object: 0x%x\n", object);
1580		if ( nl > 18) {
1581			c = cngetc();
1582			if (c != ' ')
1583				return;
1584			nl = 0;
1585		}
1586		nl++;
1587		rcount = 0;
1588		fidx = 0;
1589		osize = object->size;
1590		if (osize > 128)
1591			osize = 128;
1592		for(idx=0;idx<osize;idx++) {
1593			m = vm_page_lookup(object, idx);
1594			if (m == NULL) {
1595				if (rcount) {
1596					db_printf(" index(%d)run(%d)pa(0x%x)\n",
1597						fidx, rcount, pa);
1598					if ( nl > 18) {
1599						c = cngetc();
1600						if (c != ' ')
1601							return;
1602						nl = 0;
1603					}
1604					nl++;
1605					rcount = 0;
1606				}
1607				continue;
1608			}
1609
1610
1611			if (rcount &&
1612				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1613				++rcount;
1614				continue;
1615			}
1616			if (rcount) {
1617				padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1618				padiff >>= PAGE_SHIFT;
1619				padiff &= PQ_L2_MASK;
1620				if (padiff == 0) {
1621					pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1622					++rcount;
1623					continue;
1624				}
1625				db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa);
1626				db_printf("pd(%d)\n", padiff);
1627				if ( nl > 18) {
1628					c = cngetc();
1629					if (c != ' ')
1630						return;
1631					nl = 0;
1632				}
1633				nl++;
1634			}
1635			fidx = idx;
1636			pa = VM_PAGE_TO_PHYS(m);
1637			rcount = 1;
1638		}
1639		if (rcount) {
1640			db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa);
1641			if ( nl > 18) {
1642				c = cngetc();
1643				if (c != ' ')
1644					return;
1645				nl = 0;
1646			}
1647			nl++;
1648		}
1649	}
1650}
1651#endif /* DDB */
1652