vm_object.c revision 34611
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_object.c,v 1.118 1998/03/08 18:05:59 dyson Exp $
65 */
66
67/*
68 *	Virtual memory object module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/proc.h>		/* for curproc, pageproc */
74#include <sys/vnode.h>
75#include <sys/vmmeter.h>
76#include <sys/mman.h>
77
78#include <vm/vm.h>
79#include <vm/vm_param.h>
80#include <vm/vm_prot.h>
81#include <sys/lock.h>
82#include <vm/pmap.h>
83#include <vm/vm_map.h>
84#include <vm/vm_object.h>
85#include <vm/vm_page.h>
86#include <vm/vm_pageout.h>
87#include <vm/vm_pager.h>
88#include <vm/swap_pager.h>
89#include <vm/vm_kern.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_zone.h>
92
93static void	vm_object_qcollapse __P((vm_object_t object));
94static void vm_object_dispose __P((vm_object_t));
95
96/*
97 *	Virtual memory objects maintain the actual data
98 *	associated with allocated virtual memory.  A given
99 *	page of memory exists within exactly one object.
100 *
101 *	An object is only deallocated when all "references"
102 *	are given up.  Only one "reference" to a given
103 *	region of an object should be writeable.
104 *
105 *	Associated with each object is a list of all resident
106 *	memory pages belonging to that object; this list is
107 *	maintained by the "vm_page" module, and locked by the object's
108 *	lock.
109 *
110 *	Each object also records a "pager" routine which is
111 *	used to retrieve (and store) pages to the proper backing
112 *	storage.  In addition, objects may be backed by other
113 *	objects from which they were virtual-copied.
114 *
115 *	The only items within the object structure which are
116 *	modified after time of creation are:
117 *		reference count		locked by object's lock
118 *		pager routine		locked by object's lock
119 *
120 */
121
122struct object_q vm_object_list;
123static struct simplelock vm_object_list_lock;
124static long vm_object_count;		/* count of all objects */
125vm_object_t kernel_object;
126vm_object_t kmem_object;
127static struct vm_object kernel_object_store;
128static struct vm_object kmem_object_store;
129extern int vm_pageout_page_count;
130
131static long object_collapses;
132static long object_bypasses;
133static int next_index;
134static vm_zone_t obj_zone;
135static struct vm_zone obj_zone_store;
136#define VM_OBJECTS_INIT 256
137static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
138
139void
140_vm_object_allocate(type, size, object)
141	objtype_t type;
142	vm_size_t size;
143	register vm_object_t object;
144{
145	int incr;
146	TAILQ_INIT(&object->memq);
147	TAILQ_INIT(&object->shadow_head);
148
149	object->type = type;
150	object->size = size;
151	object->ref_count = 1;
152	object->flags = 0;
153	object->behavior = OBJ_NORMAL;
154	object->paging_in_progress = 0;
155	object->resident_page_count = 0;
156	object->cache_count = 0;
157	object->wire_count = 0;
158	object->shadow_count = 0;
159	object->pg_color = next_index;
160	if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
161		incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
162	else
163		incr = size;
164	next_index = (next_index + incr) & PQ_L2_MASK;
165	object->handle = NULL;
166	object->paging_offset = (vm_ooffset_t) 0;
167	object->backing_object = NULL;
168	object->backing_object_offset = (vm_ooffset_t) 0;
169	object->page_hint = NULL;
170
171	object->last_read = 0;
172	object->generation++;
173
174	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
175	vm_object_count++;
176}
177
178/*
179 *	vm_object_init:
180 *
181 *	Initialize the VM objects module.
182 */
183void
184vm_object_init()
185{
186	TAILQ_INIT(&vm_object_list);
187	simple_lock_init(&vm_object_list_lock);
188	vm_object_count = 0;
189
190	kernel_object = &kernel_object_store;
191	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
192	    kernel_object);
193
194	kmem_object = &kmem_object_store;
195	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
196	    kmem_object);
197
198	obj_zone = &obj_zone_store;
199	zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
200		vm_objects_init, VM_OBJECTS_INIT);
201}
202
203void
204vm_object_init2() {
205	zinitna(obj_zone, NULL, NULL, 0, 0, 0, 1);
206}
207
208/*
209 *	vm_object_allocate:
210 *
211 *	Returns a new object with the given size.
212 */
213
214vm_object_t
215vm_object_allocate(type, size)
216	objtype_t type;
217	vm_size_t size;
218{
219	register vm_object_t result;
220	result = (vm_object_t) zalloc(obj_zone);
221
222	_vm_object_allocate(type, size, result);
223
224	return (result);
225}
226
227
228/*
229 *	vm_object_reference:
230 *
231 *	Gets another reference to the given object.
232 */
233void
234vm_object_reference(object)
235	register vm_object_t object;
236{
237	if (object == NULL)
238		return;
239
240#if defined(DIAGNOSTIC)
241	if (object->flags & OBJ_DEAD)
242		panic("vm_object_reference: attempting to reference dead obj");
243#endif
244
245	object->ref_count++;
246	if (object->type == OBJT_VNODE) {
247		while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) {
248#if !defined(MAX_PERF)
249			printf("vm_object_reference: delay in getting object\n");
250#endif
251		}
252	}
253}
254
255void
256vm_object_vndeallocate(object)
257	vm_object_t object;
258{
259	struct vnode *vp = (struct vnode *) object->handle;
260#if defined(DIAGNOSTIC)
261	if (object->type != OBJT_VNODE)
262		panic("vm_object_vndeallocate: not a vnode object");
263	if (vp == NULL)
264		panic("vm_object_vndeallocate: missing vp");
265	if (object->ref_count == 0) {
266		vprint("vm_object_vndeallocate", vp);
267		panic("vm_object_vndeallocate: bad object reference count");
268	}
269#endif
270
271	object->ref_count--;
272	if (object->ref_count == 0) {
273		vp->v_flag &= ~VTEXT;
274		object->flags &= ~OBJ_OPT;
275	}
276	vrele(vp);
277}
278
279/*
280 *	vm_object_deallocate:
281 *
282 *	Release a reference to the specified object,
283 *	gained either through a vm_object_allocate
284 *	or a vm_object_reference call.  When all references
285 *	are gone, storage associated with this object
286 *	may be relinquished.
287 *
288 *	No object may be locked.
289 */
290void
291vm_object_deallocate(object)
292	vm_object_t object;
293{
294	int s;
295	vm_object_t temp;
296
297	while (object != NULL) {
298
299		if (object->type == OBJT_VNODE) {
300			vm_object_vndeallocate(object);
301			return;
302		}
303
304		if (object->ref_count == 0) {
305			panic("vm_object_deallocate: object deallocated too many times: %d", object->type);
306		} else if (object->ref_count > 2) {
307			object->ref_count--;
308			return;
309		}
310
311		/*
312		 * Here on ref_count of one or two, which are special cases for
313		 * objects.
314		 */
315		if ((object->ref_count == 2) && (object->shadow_count == 1)) {
316
317			object->ref_count--;
318			if ((object->handle == NULL) &&
319			    (object->type == OBJT_DEFAULT ||
320			     object->type == OBJT_SWAP)) {
321				vm_object_t robject;
322
323				robject = TAILQ_FIRST(&object->shadow_head);
324#if defined(DIAGNOSTIC)
325				if (robject == NULL)
326					panic("vm_object_deallocate: ref_count: %d,"
327						  " shadow_count: %d",
328						  object->ref_count, object->shadow_count);
329#endif
330				if ((robject->handle == NULL) &&
331				    (robject->type == OBJT_DEFAULT ||
332				     robject->type == OBJT_SWAP)) {
333
334					robject->ref_count++;
335
336			retry:
337					if (robject->paging_in_progress ||
338							object->paging_in_progress) {
339						vm_object_pip_sleep(robject, "objde1");
340						if (robject->paging_in_progress &&
341							robject->type == OBJT_SWAP) {
342							swap_pager_sync();
343							goto retry;
344						}
345
346						vm_object_pip_sleep(object, "objde2");
347						if (object->paging_in_progress &&
348							object->type == OBJT_SWAP) {
349							swap_pager_sync();
350						}
351						goto retry;
352					}
353
354					if( robject->ref_count == 1) {
355						robject->ref_count--;
356						object = robject;
357						goto doterm;
358					}
359
360					object = robject;
361					vm_object_collapse(object);
362					continue;
363				}
364			}
365
366			return;
367
368		} else {
369			object->ref_count--;
370			if (object->ref_count != 0)
371				return;
372		}
373
374doterm:
375
376		temp = object->backing_object;
377		if (temp) {
378			TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
379			temp->shadow_count--;
380			if (temp->ref_count == 0)
381				temp->flags &= ~OBJ_OPT;
382			temp->generation++;
383			object->backing_object = NULL;
384		}
385		vm_object_terminate(object);
386		/* unlocks and deallocates object */
387		object = temp;
388	}
389}
390
391/*
392 *	vm_object_terminate actually destroys the specified object, freeing
393 *	up all previously used resources.
394 *
395 *	The object must be locked.
396 */
397void
398vm_object_terminate(object)
399	register vm_object_t object;
400{
401	register vm_page_t p;
402	int s;
403
404	/*
405	 * Make sure no one uses us.
406	 */
407	object->flags |= OBJ_DEAD;
408
409	/*
410	 * wait for the pageout daemon to be done with the object
411	 */
412	vm_object_pip_wait(object, "objtrm");
413
414#if defined(DIAGNOSTIC)
415	if (object->paging_in_progress != 0)
416		panic("vm_object_terminate: pageout in progress");
417#endif
418
419	/*
420	 * Clean and free the pages, as appropriate. All references to the
421	 * object are gone, so we don't need to lock it.
422	 */
423	if (object->type == OBJT_VNODE) {
424		struct vnode *vp;
425
426		/*
427		 * Freeze optimized copies.
428		 */
429		vm_freeze_copyopts(object, 0, object->size);
430
431		/*
432		 * Clean pages and flush buffers.
433		 */
434		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
435
436		vp = (struct vnode *) object->handle;
437		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
438
439		/*
440		 * Let the pager know object is dead.
441		 */
442		vm_pager_deallocate(object);
443
444	}
445
446	if ((object->type != OBJT_VNODE) && (object->ref_count == 0)) {
447
448		/*
449		 * Now free the pages. For internal objects, this also removes them
450		 * from paging queues.
451		 */
452		while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
453#if !defined(MAX_PERF)
454			if (p->busy || (p->flags & PG_BUSY))
455				printf("vm_object_terminate: freeing busy page\n");
456#endif
457			p->flags |= PG_BUSY;
458			vm_page_free(p);
459			cnt.v_pfree++;
460		}
461		/*
462		 * Let the pager know object is dead.
463		 */
464		vm_pager_deallocate(object);
465
466	}
467
468	if ((object->ref_count == 0) && (object->resident_page_count == 0))
469		vm_object_dispose(object);
470}
471
472/*
473 * vm_object_dispose
474 *
475 * Dispose the object.
476 */
477static void
478vm_object_dispose(object)
479	vm_object_t object;
480{
481		simple_lock(&vm_object_list_lock);
482		TAILQ_REMOVE(&vm_object_list, object, object_list);
483		vm_object_count--;
484		simple_unlock(&vm_object_list_lock);
485		/*
486   		* Free the space for the object.
487   		*/
488		zfree(obj_zone, object);
489		wakeup(object);
490}
491
492/*
493 *	vm_object_page_clean
494 *
495 *	Clean all dirty pages in the specified range of object.
496 *	Leaves page on whatever queue it is currently on.
497 *
498 *	Odd semantics: if start == end, we clean everything.
499 *
500 *	The object must be locked.
501 */
502
503void
504vm_object_page_clean(object, start, end, flags)
505	vm_object_t object;
506	vm_pindex_t start;
507	vm_pindex_t end;
508	int flags;
509{
510	register vm_page_t p, np, tp;
511	register vm_offset_t tstart, tend;
512	vm_pindex_t pi;
513	int s;
514	struct vnode *vp;
515	int runlen;
516	int maxf;
517	int chkb;
518	int maxb;
519	int i;
520	int pagerflags;
521	vm_page_t maf[vm_pageout_page_count];
522	vm_page_t mab[vm_pageout_page_count];
523	vm_page_t ma[vm_pageout_page_count];
524	int curgeneration;
525	struct proc *pproc = curproc;	/* XXX */
526
527	if (object->type != OBJT_VNODE ||
528		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
529		return;
530
531	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : 0;
532	pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
533
534	vp = object->handle;
535
536	object->flags |= OBJ_CLEANING;
537
538	tstart = start;
539	if (end == 0) {
540		tend = object->size;
541	} else {
542		tend = end;
543	}
544
545	for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) {
546		p->flags |= PG_CLEANCHK;
547		vm_page_protect(p, VM_PROT_READ);
548	}
549
550	if ((tstart == 0) && (tend == object->size)) {
551		object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
552	}
553
554rescan:
555	curgeneration = object->generation;
556
557	for(p = TAILQ_FIRST(&object->memq); p; p = np) {
558		np = TAILQ_NEXT(p, listq);
559
560		pi = p->pindex;
561		if (((p->flags & PG_CLEANCHK) == 0) ||
562			(pi < tstart) || (pi >= tend) ||
563			(p->valid == 0) ||
564			((p->queue - p->pc) == PQ_CACHE)) {
565			p->flags &= ~PG_CLEANCHK;
566			continue;
567		}
568
569		vm_page_test_dirty(p);
570		if ((p->dirty & p->valid) == 0) {
571			p->flags &= ~PG_CLEANCHK;
572			continue;
573		}
574
575		s = splvm();
576		while ((p->flags & PG_BUSY) || p->busy) {
577			p->flags |= PG_WANTED | PG_REFERENCED;
578			tsleep(p, PVM, "vpcwai", 0);
579			if (object->generation != curgeneration) {
580				splx(s);
581				goto rescan;
582			}
583		}
584
585		maxf = 0;
586		for(i=1;i<vm_pageout_page_count;i++) {
587			if (tp = vm_page_lookup(object, pi + i)) {
588				if ((tp->flags & PG_BUSY) ||
589					(tp->flags & PG_CLEANCHK) == 0 ||
590					(tp->busy != 0))
591					break;
592				if((tp->queue - tp->pc) == PQ_CACHE) {
593					tp->flags &= ~PG_CLEANCHK;
594					break;
595				}
596				vm_page_test_dirty(tp);
597				if ((tp->dirty & tp->valid) == 0) {
598					tp->flags &= ~PG_CLEANCHK;
599					break;
600				}
601				maf[ i - 1 ] = tp;
602				maxf++;
603				continue;
604			}
605			break;
606		}
607
608		maxb = 0;
609		chkb = vm_pageout_page_count -  maxf;
610		if (chkb) {
611			for(i = 1; i < chkb;i++) {
612				if (tp = vm_page_lookup(object, pi - i)) {
613					if ((tp->flags & PG_BUSY) ||
614						(tp->flags & PG_CLEANCHK) == 0 ||
615						(tp->busy != 0))
616						break;
617					if((tp->queue - tp->pc) == PQ_CACHE) {
618						tp->flags &= ~PG_CLEANCHK;
619						break;
620					}
621					vm_page_test_dirty(tp);
622					if ((tp->dirty & tp->valid) == 0) {
623						tp->flags &= ~PG_CLEANCHK;
624						break;
625					}
626					mab[ i - 1 ] = tp;
627					maxb++;
628					continue;
629				}
630				break;
631			}
632		}
633
634		for(i=0;i<maxb;i++) {
635			int index = (maxb - i) - 1;
636			ma[index] = mab[i];
637			ma[index]->flags &= ~PG_CLEANCHK;
638		}
639		p->flags &= ~PG_CLEANCHK;
640		ma[maxb] = p;
641		for(i=0;i<maxf;i++) {
642			int index = (maxb + i) + 1;
643			ma[index] = maf[i];
644			ma[index]->flags &= ~PG_CLEANCHK;
645		}
646		runlen = maxb + maxf + 1;
647		splx(s);
648		vm_pageout_flush(ma, runlen, pagerflags);
649		if (object->generation != curgeneration)
650			goto rescan;
651	}
652
653	VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?1:0, curproc);
654
655	object->flags &= ~OBJ_CLEANING;
656	return;
657}
658
659#ifdef not_used
660/* XXX I cannot tell if this should be an exported symbol */
661/*
662 *	vm_object_deactivate_pages
663 *
664 *	Deactivate all pages in the specified object.  (Keep its pages
665 *	in memory even though it is no longer referenced.)
666 *
667 *	The object must be locked.
668 */
669static void
670vm_object_deactivate_pages(object)
671	register vm_object_t object;
672{
673	register vm_page_t p, next;
674
675	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
676		next = TAILQ_NEXT(p, listq);
677		vm_page_deactivate(p);
678	}
679}
680#endif
681
682/*
683 *	vm_object_pmap_copy:
684 *
685 *	Makes all physical pages in the specified
686 *	object range copy-on-write.  No writeable
687 *	references to these pages should remain.
688 *
689 *	The object must *not* be locked.
690 */
691void
692vm_object_pmap_copy(object, start, end)
693	register vm_object_t object;
694	register vm_pindex_t start;
695	register vm_pindex_t end;
696{
697	register vm_page_t p;
698
699	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
700		return;
701
702	for (p = TAILQ_FIRST(&object->memq);
703		p != NULL;
704		p = TAILQ_NEXT(p, listq)) {
705		vm_page_protect(p, VM_PROT_READ);
706	}
707
708	object->flags &= ~OBJ_WRITEABLE;
709}
710
711/*
712 * Same as vm_object_pmap_copy_1, except range checking really
713 * works, and is meant for small sections of an object.
714 */
715void
716vm_object_pmap_copy_1(object, start, end)
717	register vm_object_t object;
718	register vm_pindex_t start;
719	register vm_pindex_t end;
720{
721	vm_pindex_t idx;
722	register vm_page_t p;
723
724	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
725		return;
726
727	for (idx = start; idx < end; idx++) {
728		p = vm_page_lookup(object, idx);
729		if (p == NULL)
730			continue;
731		vm_page_protect(p, VM_PROT_READ);
732	}
733}
734
735/*
736 *	vm_object_pmap_remove:
737 *
738 *	Removes all physical pages in the specified
739 *	object range from all physical maps.
740 *
741 *	The object must *not* be locked.
742 */
743void
744vm_object_pmap_remove(object, start, end)
745	register vm_object_t object;
746	register vm_pindex_t start;
747	register vm_pindex_t end;
748{
749	register vm_page_t p;
750	if (object == NULL)
751		return;
752	for (p = TAILQ_FIRST(&object->memq);
753		p != NULL;
754		p = TAILQ_NEXT(p, listq)) {
755		if (p->pindex >= start && p->pindex < end)
756			vm_page_protect(p, VM_PROT_NONE);
757	}
758	if ((start == 0) && (object->size == end))
759		object->flags &= ~OBJ_WRITEABLE;
760}
761
762/*
763 *	vm_object_madvise:
764 *
765 *	Implements the madvise function at the object/page level.
766 */
767void
768vm_object_madvise(object, pindex, count, advise)
769	vm_object_t object;
770	vm_pindex_t pindex;
771	int count;
772	int advise;
773{
774	int s;
775	vm_pindex_t end, tpindex;
776	vm_object_t tobject;
777	vm_page_t m;
778
779	if (object == NULL)
780		return;
781
782	end = pindex + count;
783
784	for (; pindex < end; pindex += 1) {
785
786relookup:
787		tobject = object;
788		tpindex = pindex;
789shadowlookup:
790		m = vm_page_lookup(tobject, tpindex);
791		if (m == NULL) {
792			if (tobject->type != OBJT_DEFAULT) {
793				continue;
794			}
795
796			tobject = tobject->backing_object;
797			if ((tobject == NULL) || (tobject->ref_count != 1)) {
798				continue;
799			}
800			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
801			goto shadowlookup;
802		}
803
804		/*
805		 * If the page is busy or not in a normal active state,
806		 * we skip it.  Things can break if we mess with pages
807		 * in any of the below states.
808		 */
809		if (m->hold_count || m->wire_count ||
810			m->valid != VM_PAGE_BITS_ALL) {
811			continue;
812		}
813
814 		if (vm_page_sleep(m, "madvpo", &m->busy))
815  			goto relookup;
816
817		if (advise == MADV_WILLNEED) {
818			vm_page_activate(m);
819		} else if (advise == MADV_DONTNEED) {
820			vm_page_deactivate(m);
821		} else if (advise == MADV_FREE) {
822			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
823			m->dirty = 0;
824			/*
825			 * Force a demand zero if attempt to read from swap.
826			 * We currently don't handle vnode files correctly,
827			 * and will reread stale contents unnecessarily.
828			 */
829			if (object->type == OBJT_SWAP)
830				swap_pager_dmzspace(tobject, m->pindex, 1);
831		}
832	}
833}
834
835/*
836 *	vm_object_shadow:
837 *
838 *	Create a new object which is backed by the
839 *	specified existing object range.  The source
840 *	object reference is deallocated.
841 *
842 *	The new object and offset into that object
843 *	are returned in the source parameters.
844 */
845
846void
847vm_object_shadow(object, offset, length)
848	vm_object_t *object;	/* IN/OUT */
849	vm_ooffset_t *offset;	/* IN/OUT */
850	vm_size_t length;
851{
852	register vm_object_t source;
853	register vm_object_t result;
854
855	source = *object;
856
857	/*
858	 * Allocate a new object with the given length
859	 */
860
861	if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
862		panic("vm_object_shadow: no object for shadowing");
863
864	/*
865	 * The new object shadows the source object, adding a reference to it.
866	 * Our caller changes his reference to point to the new object,
867	 * removing a reference to the source object.  Net result: no change
868	 * of reference count.
869	 */
870	result->backing_object = source;
871	if (source) {
872		TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list);
873		source->shadow_count++;
874		source->generation++;
875	}
876
877	/*
878	 * Store the offset into the source object, and fix up the offset into
879	 * the new object.
880	 */
881
882	result->backing_object_offset = *offset;
883
884	/*
885	 * Return the new things
886	 */
887
888	*offset = 0;
889	*object = result;
890}
891
892
893/*
894 * this version of collapse allows the operation to occur earlier and
895 * when paging_in_progress is true for an object...  This is not a complete
896 * operation, but should plug 99.9% of the rest of the leaks.
897 */
898static void
899vm_object_qcollapse(object)
900	register vm_object_t object;
901{
902	register vm_object_t backing_object;
903	register vm_pindex_t backing_offset_index, paging_offset_index;
904	vm_pindex_t backing_object_paging_offset_index;
905	vm_pindex_t new_pindex;
906	register vm_page_t p, pp;
907	register vm_size_t size;
908
909	backing_object = object->backing_object;
910	if (backing_object->ref_count != 1)
911		return;
912
913	backing_object->ref_count += 2;
914
915	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
916	backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset);
917	paging_offset_index = OFF_TO_IDX(object->paging_offset);
918	size = object->size;
919	p = TAILQ_FIRST(&backing_object->memq);
920	while (p) {
921		vm_page_t next;
922
923		next = TAILQ_NEXT(p, listq);
924		if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) ||
925		    !p->valid || p->hold_count || p->wire_count || p->busy) {
926			p = next;
927			continue;
928		}
929		p->flags |= PG_BUSY;
930
931		new_pindex = p->pindex - backing_offset_index;
932		if (p->pindex < backing_offset_index ||
933		    new_pindex >= size) {
934			if (backing_object->type == OBJT_SWAP)
935				swap_pager_freespace(backing_object,
936				    backing_object_paging_offset_index+p->pindex,
937				    1);
938			vm_page_protect(p, VM_PROT_NONE);
939			vm_page_free(p);
940		} else {
941			pp = vm_page_lookup(object, new_pindex);
942			if (pp != NULL ||
943				(object->type == OBJT_SWAP && vm_pager_has_page(object,
944				    paging_offset_index + new_pindex, NULL, NULL))) {
945				if (backing_object->type == OBJT_SWAP)
946					swap_pager_freespace(backing_object,
947					    backing_object_paging_offset_index + p->pindex, 1);
948				vm_page_protect(p, VM_PROT_NONE);
949				vm_page_free(p);
950			} else {
951				if (backing_object->type == OBJT_SWAP)
952					swap_pager_freespace(backing_object,
953					    backing_object_paging_offset_index + p->pindex, 1);
954
955				if ((p->queue - p->pc) == PQ_CACHE)
956					vm_page_deactivate(p);
957				else
958					vm_page_protect(p, VM_PROT_NONE);
959
960				vm_page_rename(p, object, new_pindex);
961				p->dirty = VM_PAGE_BITS_ALL;
962			}
963		}
964		p = next;
965	}
966	backing_object->ref_count -= 2;
967}
968
969/*
970 *	vm_object_collapse:
971 *
972 *	Collapse an object with the object backing it.
973 *	Pages in the backing object are moved into the
974 *	parent, and the backing object is deallocated.
975 */
976void
977vm_object_collapse(object)
978	vm_object_t object;
979
980{
981	vm_object_t backing_object;
982	vm_ooffset_t backing_offset;
983	vm_size_t size;
984	vm_pindex_t new_pindex, backing_offset_index;
985	vm_page_t p, pp;
986
987	while (TRUE) {
988		/*
989		 * Verify that the conditions are right for collapse:
990		 *
991		 * The object exists and no pages in it are currently being paged
992		 * out.
993		 */
994		if (object == NULL)
995			return;
996
997		/*
998		 * Make sure there is a backing object.
999		 */
1000		if ((backing_object = object->backing_object) == NULL)
1001			return;
1002
1003		/*
1004		 * we check the backing object first, because it is most likely
1005		 * not collapsable.
1006		 */
1007		if (backing_object->handle != NULL ||
1008		    (backing_object->type != OBJT_DEFAULT &&
1009		     backing_object->type != OBJT_SWAP) ||
1010		    (backing_object->flags & OBJ_DEAD) ||
1011		    object->handle != NULL ||
1012		    (object->type != OBJT_DEFAULT &&
1013		     object->type != OBJT_SWAP) ||
1014		    (object->flags & OBJ_DEAD)) {
1015			return;
1016		}
1017
1018		if (object->paging_in_progress != 0 ||
1019		    backing_object->paging_in_progress != 0) {
1020			vm_object_qcollapse(object);
1021			return;
1022		}
1023
1024		/*
1025		 * We know that we can either collapse the backing object (if
1026		 * the parent is the only reference to it) or (perhaps) remove
1027		 * the parent's reference to it.
1028		 */
1029
1030		backing_offset = object->backing_object_offset;
1031		backing_offset_index = OFF_TO_IDX(backing_offset);
1032		size = object->size;
1033
1034		/*
1035		 * If there is exactly one reference to the backing object, we
1036		 * can collapse it into the parent.
1037		 */
1038
1039		if (backing_object->ref_count == 1) {
1040
1041			backing_object->flags |= OBJ_DEAD;
1042			/*
1043			 * We can collapse the backing object.
1044			 *
1045			 * Move all in-memory pages from backing_object to the
1046			 * parent.  Pages that have been paged out will be
1047			 * overwritten by any of the parent's pages that
1048			 * shadow them.
1049			 */
1050
1051			while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) {
1052
1053				new_pindex = p->pindex - backing_offset_index;
1054				p->flags |= PG_BUSY;
1055
1056				/*
1057				 * If the parent has a page here, or if this
1058				 * page falls outside the parent, dispose of
1059				 * it.
1060				 *
1061				 * Otherwise, move it as planned.
1062				 */
1063
1064				if (p->pindex < backing_offset_index ||
1065				    new_pindex >= size) {
1066					vm_page_protect(p, VM_PROT_NONE);
1067					vm_page_free(p);
1068				} else {
1069					pp = vm_page_lookup(object, new_pindex);
1070					if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
1071					    OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) {
1072						vm_page_protect(p, VM_PROT_NONE);
1073						vm_page_free(p);
1074					} else {
1075						if ((p->queue - p->pc) == PQ_CACHE)
1076							vm_page_deactivate(p);
1077						else
1078							vm_page_protect(p, VM_PROT_NONE);
1079						vm_page_rename(p, object, new_pindex);
1080						p->dirty = VM_PAGE_BITS_ALL;
1081					}
1082				}
1083			}
1084
1085			/*
1086			 * Move the pager from backing_object to object.
1087			 */
1088
1089			if (backing_object->type == OBJT_SWAP) {
1090				backing_object->paging_in_progress++;
1091				if (object->type == OBJT_SWAP) {
1092					object->paging_in_progress++;
1093					/*
1094					 * copy shadow object pages into ours
1095					 * and destroy unneeded pages in
1096					 * shadow object.
1097					 */
1098					swap_pager_copy(
1099					    backing_object,
1100					    OFF_TO_IDX(backing_object->paging_offset),
1101					    object,
1102					    OFF_TO_IDX(object->paging_offset),
1103					    OFF_TO_IDX(object->backing_object_offset));
1104					vm_object_pip_wakeup(object);
1105				} else {
1106					object->paging_in_progress++;
1107					/*
1108					 * move the shadow backing_object's pager data to
1109					 * "object" and convert "object" type to OBJT_SWAP.
1110					 */
1111					object->type = OBJT_SWAP;
1112					object->un_pager.swp.swp_nblocks =
1113					    backing_object->un_pager.swp.swp_nblocks;
1114					object->un_pager.swp.swp_allocsize =
1115					    backing_object->un_pager.swp.swp_allocsize;
1116					object->un_pager.swp.swp_blocks =
1117					    backing_object->un_pager.swp.swp_blocks;
1118					object->un_pager.swp.swp_poip =		/* XXX */
1119					    backing_object->un_pager.swp.swp_poip;
1120					object->paging_offset = backing_object->paging_offset + backing_offset;
1121					TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list);
1122
1123					/*
1124					 * Convert backing object from OBJT_SWAP to
1125					 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is
1126					 * actually necessary.
1127					 */
1128					backing_object->type = OBJT_DEFAULT;
1129					TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list);
1130					/*
1131					 * free unnecessary blocks
1132					 */
1133					swap_pager_freespace(object, 0,
1134						OFF_TO_IDX(object->paging_offset));
1135					vm_object_pip_wakeup(object);
1136				}
1137
1138				vm_object_pip_wakeup(backing_object);
1139			}
1140			/*
1141			 * Object now shadows whatever backing_object did.
1142			 * Note that the reference to backing_object->backing_object
1143			 * moves from within backing_object to within object.
1144			 */
1145
1146			TAILQ_REMOVE(&object->backing_object->shadow_head, object,
1147			    shadow_list);
1148			object->backing_object->shadow_count--;
1149			object->backing_object->generation++;
1150			if (backing_object->backing_object) {
1151				TAILQ_REMOVE(&backing_object->backing_object->shadow_head,
1152				    backing_object, shadow_list);
1153				backing_object->backing_object->shadow_count--;
1154				backing_object->backing_object->generation++;
1155			}
1156			object->backing_object = backing_object->backing_object;
1157			if (object->backing_object) {
1158				TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
1159				    object, shadow_list);
1160				object->backing_object->shadow_count++;
1161				object->backing_object->generation++;
1162			}
1163
1164			object->backing_object_offset += backing_object->backing_object_offset;
1165			/*
1166			 * Discard backing_object.
1167			 *
1168			 * Since the backing object has no pages, no pager left,
1169			 * and no object references within it, all that is
1170			 * necessary is to dispose of it.
1171			 */
1172
1173			TAILQ_REMOVE(&vm_object_list, backing_object,
1174			    object_list);
1175			vm_object_count--;
1176
1177			zfree(obj_zone, backing_object);
1178
1179			object_collapses++;
1180		} else {
1181			vm_object_t new_backing_object;
1182			/*
1183			 * If all of the pages in the backing object are
1184			 * shadowed by the parent object, the parent object no
1185			 * longer has to shadow the backing object; it can
1186			 * shadow the next one in the chain.
1187			 *
1188			 * The backing object must not be paged out - we'd have
1189			 * to check all of the paged-out pages, as well.
1190			 */
1191
1192			if (backing_object->type != OBJT_DEFAULT) {
1193				return;
1194			}
1195			/*
1196			 * Should have a check for a 'small' number of pages
1197			 * here.
1198			 */
1199
1200			for (p = TAILQ_FIRST(&backing_object->memq); p;
1201					p = TAILQ_NEXT(p, listq)) {
1202
1203				new_pindex = p->pindex - backing_offset_index;
1204				p->flags |= PG_BUSY;
1205
1206				/*
1207				 * If the parent has a page here, or if this
1208				 * page falls outside the parent, keep going.
1209				 *
1210				 * Otherwise, the backing_object must be left in
1211				 * the chain.
1212				 */
1213
1214				if (p->pindex >= backing_offset_index &&
1215					new_pindex <= size) {
1216
1217					pp = vm_page_lookup(object, new_pindex);
1218
1219					if ((pp == NULL) || (pp->flags & PG_BUSY) || pp->busy) {
1220						PAGE_WAKEUP(p);
1221						return;
1222					}
1223
1224					pp->flags |= PG_BUSY;
1225					if ((pp->valid == 0) &&
1226				   	    !vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) {
1227						/*
1228						 * Page still needed. Can't go any
1229						 * further.
1230						 */
1231						PAGE_WAKEUP(pp);
1232						PAGE_WAKEUP(p);
1233						return;
1234					}
1235					PAGE_WAKEUP(pp);
1236				}
1237				PAGE_WAKEUP(p);
1238			}
1239
1240			/*
1241			 * Make the parent shadow the next object in the
1242			 * chain.  Deallocating backing_object will not remove
1243			 * it, since its reference count is at least 2.
1244			 */
1245
1246			TAILQ_REMOVE(&backing_object->shadow_head,
1247			    object, shadow_list);
1248			backing_object->shadow_count--;
1249			backing_object->generation++;
1250
1251			new_backing_object = backing_object->backing_object;
1252			if (object->backing_object = new_backing_object) {
1253				vm_object_reference(new_backing_object);
1254				TAILQ_INSERT_TAIL(&new_backing_object->shadow_head,
1255				    object, shadow_list);
1256				new_backing_object->shadow_count++;
1257				new_backing_object->generation++;
1258				object->backing_object_offset +=
1259					backing_object->backing_object_offset;
1260			}
1261
1262			/*
1263			 * Drop the reference count on backing_object. Since
1264			 * its ref_count was at least 2, it will not vanish;
1265			 * so we don't need to call vm_object_deallocate, but
1266			 * we do anyway.
1267			 */
1268			vm_object_deallocate(backing_object);
1269			object_bypasses++;
1270		}
1271
1272		/*
1273		 * Try again with this object's new backing object.
1274		 */
1275	}
1276}
1277
1278/*
1279 *	vm_object_page_remove: [internal]
1280 *
1281 *	Removes all physical pages in the specified
1282 *	object range from the object's list of pages.
1283 *
1284 *	The object must be locked.
1285 */
1286void
1287vm_object_page_remove(object, start, end, clean_only)
1288	register vm_object_t object;
1289	register vm_pindex_t start;
1290	register vm_pindex_t end;
1291	boolean_t clean_only;
1292{
1293	register vm_page_t p, next;
1294	unsigned int size;
1295	int s, all;
1296
1297	if (object == NULL)
1298		return;
1299
1300	all = ((end == 0) && (start == 0));
1301
1302	object->paging_in_progress++;
1303again:
1304	size = end - start;
1305	if (all || size > 4 || size >= object->size / 4) {
1306		for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
1307			next = TAILQ_NEXT(p, listq);
1308			if (all || ((start <= p->pindex) && (p->pindex < end))) {
1309				if (p->wire_count != 0) {
1310					vm_page_protect(p, VM_PROT_NONE);
1311					p->valid = 0;
1312					continue;
1313				}
1314
1315				/*
1316				 * The busy flags are only cleared at
1317				 * interrupt -- minimize the spl transitions
1318				 */
1319
1320 				if (vm_page_sleep(p, "vmopar", &p->busy))
1321 					goto again;
1322
1323				if (clean_only && p->valid) {
1324					vm_page_test_dirty(p);
1325					if (p->valid & p->dirty)
1326						continue;
1327				}
1328
1329				p->flags |= PG_BUSY;
1330				vm_page_protect(p, VM_PROT_NONE);
1331				vm_page_free(p);
1332			}
1333		}
1334	} else {
1335		while (size > 0) {
1336			if ((p = vm_page_lookup(object, start)) != 0) {
1337
1338				if (p->wire_count != 0) {
1339					p->valid = 0;
1340					vm_page_protect(p, VM_PROT_NONE);
1341					start += 1;
1342					size -= 1;
1343					continue;
1344				}
1345
1346				/*
1347				 * The busy flags are only cleared at
1348				 * interrupt -- minimize the spl transitions
1349				 */
1350 				if (vm_page_sleep(p, "vmopar", &p->busy))
1351					goto again;
1352
1353				if (clean_only && p->valid) {
1354					vm_page_test_dirty(p);
1355					if (p->valid & p->dirty) {
1356						start += 1;
1357						size -= 1;
1358						continue;
1359					}
1360				}
1361
1362				p->flags |= PG_BUSY;
1363				vm_page_protect(p, VM_PROT_NONE);
1364				vm_page_free(p);
1365			}
1366			start += 1;
1367			size -= 1;
1368		}
1369	}
1370	vm_object_pip_wakeup(object);
1371}
1372
1373/*
1374 *	Routine:	vm_object_coalesce
1375 *	Function:	Coalesces two objects backing up adjoining
1376 *			regions of memory into a single object.
1377 *
1378 *	returns TRUE if objects were combined.
1379 *
1380 *	NOTE:	Only works at the moment if the second object is NULL -
1381 *		if it's not, which object do we lock first?
1382 *
1383 *	Parameters:
1384 *		prev_object	First object to coalesce
1385 *		prev_offset	Offset into prev_object
1386 *		next_object	Second object into coalesce
1387 *		next_offset	Offset into next_object
1388 *
1389 *		prev_size	Size of reference to prev_object
1390 *		next_size	Size of reference to next_object
1391 *
1392 *	Conditions:
1393 *	The object must *not* be locked.
1394 */
1395boolean_t
1396vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
1397	register vm_object_t prev_object;
1398	vm_pindex_t prev_pindex;
1399	vm_size_t prev_size, next_size;
1400{
1401	vm_size_t newsize;
1402
1403	if (prev_object == NULL) {
1404		return (TRUE);
1405	}
1406
1407	if (prev_object->type != OBJT_DEFAULT) {
1408		return (FALSE);
1409	}
1410
1411	/*
1412	 * Try to collapse the object first
1413	 */
1414	vm_object_collapse(prev_object);
1415
1416	/*
1417	 * Can't coalesce if: . more than one reference . paged out . shadows
1418	 * another object . has a copy elsewhere (any of which mean that the
1419	 * pages not mapped to prev_entry may be in use anyway)
1420	 */
1421
1422	if (prev_object->backing_object != NULL) {
1423		return (FALSE);
1424	}
1425
1426	prev_size >>= PAGE_SHIFT;
1427	next_size >>= PAGE_SHIFT;
1428
1429	if ((prev_object->ref_count > 1) &&
1430	    (prev_object->size != prev_pindex + prev_size)) {
1431		return (FALSE);
1432	}
1433
1434	/*
1435	 * Remove any pages that may still be in the object from a previous
1436	 * deallocation.
1437	 */
1438
1439	vm_object_page_remove(prev_object,
1440	    prev_pindex + prev_size,
1441	    prev_pindex + prev_size + next_size, FALSE);
1442
1443	/*
1444	 * Extend the object if necessary.
1445	 */
1446	newsize = prev_pindex + prev_size + next_size;
1447	if (newsize > prev_object->size)
1448		prev_object->size = newsize;
1449
1450	return (TRUE);
1451}
1452
1453#include "opt_ddb.h"
1454#ifdef DDB
1455#include <sys/kernel.h>
1456
1457#include <machine/cons.h>
1458
1459#include <ddb/ddb.h>
1460
1461static int	_vm_object_in_map __P((vm_map_t map, vm_object_t object,
1462				       vm_map_entry_t entry));
1463static int	vm_object_in_map __P((vm_object_t object));
1464
1465static int
1466_vm_object_in_map(map, object, entry)
1467	vm_map_t map;
1468	vm_object_t object;
1469	vm_map_entry_t entry;
1470{
1471	vm_map_t tmpm;
1472	vm_map_entry_t tmpe;
1473	vm_object_t obj;
1474	int entcount;
1475
1476	if (map == 0)
1477		return 0;
1478
1479	if (entry == 0) {
1480		tmpe = map->header.next;
1481		entcount = map->nentries;
1482		while (entcount-- && (tmpe != &map->header)) {
1483			if( _vm_object_in_map(map, object, tmpe)) {
1484				return 1;
1485			}
1486			tmpe = tmpe->next;
1487		}
1488	} else if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1489		tmpm = entry->object.share_map;
1490		tmpe = tmpm->header.next;
1491		entcount = tmpm->nentries;
1492		while (entcount-- && tmpe != &tmpm->header) {
1493			if( _vm_object_in_map(tmpm, object, tmpe)) {
1494				return 1;
1495			}
1496			tmpe = tmpe->next;
1497		}
1498	} else if (obj = entry->object.vm_object) {
1499		for(; obj; obj=obj->backing_object)
1500			if( obj == object) {
1501				return 1;
1502			}
1503	}
1504	return 0;
1505}
1506
1507static int
1508vm_object_in_map( object)
1509	vm_object_t object;
1510{
1511	struct proc *p;
1512	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1513		if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
1514			continue;
1515		if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0))
1516			return 1;
1517	}
1518	if( _vm_object_in_map( kernel_map, object, 0))
1519		return 1;
1520	if( _vm_object_in_map( kmem_map, object, 0))
1521		return 1;
1522	if( _vm_object_in_map( pager_map, object, 0))
1523		return 1;
1524	if( _vm_object_in_map( buffer_map, object, 0))
1525		return 1;
1526	if( _vm_object_in_map( io_map, object, 0))
1527		return 1;
1528	if( _vm_object_in_map( phys_map, object, 0))
1529		return 1;
1530	if( _vm_object_in_map( mb_map, object, 0))
1531		return 1;
1532	if( _vm_object_in_map( u_map, object, 0))
1533		return 1;
1534	return 0;
1535}
1536
1537DB_SHOW_COMMAND(vmochk, vm_object_check)
1538{
1539	vm_object_t object;
1540
1541	/*
1542	 * make sure that internal objs are in a map somewhere
1543	 * and none have zero ref counts.
1544	 */
1545	for (object = TAILQ_FIRST(&vm_object_list);
1546			object != NULL;
1547			object = TAILQ_NEXT(object, object_list)) {
1548		if (object->handle == NULL &&
1549		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1550			if (object->ref_count == 0) {
1551				db_printf("vmochk: internal obj has zero ref count: %d\n",
1552					object->size);
1553			}
1554			if (!vm_object_in_map(object)) {
1555				db_printf("vmochk: internal obj is not in a map: "
1556		"ref: %d, size: %d: 0x%x, backing_object: 0x%x\n",
1557				    object->ref_count, object->size,
1558				    object->size, object->backing_object);
1559			}
1560		}
1561	}
1562}
1563
1564/*
1565 *	vm_object_print:	[ debug ]
1566 */
1567DB_SHOW_COMMAND(object, vm_object_print_static)
1568{
1569	/* XXX convert args. */
1570	vm_object_t object = (vm_object_t)addr;
1571	boolean_t full = have_addr;
1572
1573	register vm_page_t p;
1574
1575	/* XXX count is an (unused) arg.  Avoid shadowing it. */
1576#define	count	was_count
1577
1578	register int count;
1579
1580	if (object == NULL)
1581		return;
1582
1583	db_iprintf("Object 0x%x: type=%d, size=0x%x, res=%d, ref=%d, flags=0x%x\n",
1584	    (int) object, (int) object->type, (int) object->size,
1585	    object->resident_page_count,
1586		object->ref_count,
1587		object->flags);
1588	db_iprintf(" sref=%d, offset=0x%x, backing_object(%d)=(0x%x)+0x%x\n",
1589		object->shadow_count,
1590	    (int) object->paging_offset,
1591		(((int)object->backing_object)?object->backing_object->ref_count:0),
1592	    (int) object->backing_object,
1593		(int) object->backing_object_offset);
1594
1595	if (!full)
1596		return;
1597
1598	db_indent += 2;
1599	count = 0;
1600	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
1601		if (count == 0)
1602			db_iprintf("memory:=");
1603		else if (count == 6) {
1604			db_printf("\n");
1605			db_iprintf(" ...");
1606			count = 0;
1607		} else
1608			db_printf(",");
1609		count++;
1610
1611		db_printf("(off=0x%lx,page=0x%lx)",
1612		    (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1613	}
1614	if (count != 0)
1615		db_printf("\n");
1616	db_indent -= 2;
1617}
1618
1619/* XXX. */
1620#undef count
1621
1622/* XXX need this non-static entry for calling from vm_map_print. */
1623void
1624vm_object_print(addr, have_addr, count, modif)
1625	db_expr_t addr;
1626	boolean_t have_addr;
1627	db_expr_t count;
1628	char *modif;
1629{
1630	vm_object_print_static(addr, have_addr, count, modif);
1631}
1632
1633DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1634{
1635	vm_object_t object;
1636	int nl = 0;
1637	int c;
1638	for (object = TAILQ_FIRST(&vm_object_list);
1639			object != NULL;
1640			object = TAILQ_NEXT(object, object_list)) {
1641		vm_pindex_t idx, fidx;
1642		vm_pindex_t osize;
1643		vm_offset_t pa = -1, padiff;
1644		int rcount;
1645		vm_page_t m;
1646
1647		db_printf("new object: 0x%x\n", object);
1648		if ( nl > 18) {
1649			c = cngetc();
1650			if (c != ' ')
1651				return;
1652			nl = 0;
1653		}
1654		nl++;
1655		rcount = 0;
1656		fidx = 0;
1657		osize = object->size;
1658		if (osize > 128)
1659			osize = 128;
1660		for(idx=0;idx<osize;idx++) {
1661			m = vm_page_lookup(object, idx);
1662			if (m == NULL) {
1663				if (rcount) {
1664					db_printf(" index(%d)run(%d)pa(0x%x)\n",
1665						fidx, rcount, pa);
1666					if ( nl > 18) {
1667						c = cngetc();
1668						if (c != ' ')
1669							return;
1670						nl = 0;
1671					}
1672					nl++;
1673					rcount = 0;
1674				}
1675				continue;
1676			}
1677
1678
1679			if (rcount &&
1680				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1681				++rcount;
1682				continue;
1683			}
1684			if (rcount) {
1685				padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1686				padiff >>= PAGE_SHIFT;
1687				padiff &= PQ_L2_MASK;
1688				if (padiff == 0) {
1689					pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1690					++rcount;
1691					continue;
1692				}
1693				db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa);
1694				db_printf("pd(%d)\n", padiff);
1695				if ( nl > 18) {
1696					c = cngetc();
1697					if (c != ' ')
1698						return;
1699					nl = 0;
1700				}
1701				nl++;
1702			}
1703			fidx = idx;
1704			pa = VM_PAGE_TO_PHYS(m);
1705			rcount = 1;
1706		}
1707		if (rcount) {
1708			db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa);
1709			if ( nl > 18) {
1710				c = cngetc();
1711				if (c != ' ')
1712					return;
1713				nl = 0;
1714			}
1715			nl++;
1716		}
1717	}
1718}
1719#endif /* DDB */
1720