vm_object.c revision 69972
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: head/sys/vm/vm_object.c 69972 2000-12-13 10:01:00Z tanimura $
65 */
66
67/*
68 *	Virtual memory object module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/proc.h>		/* for curproc, pageproc */
74#include <sys/vnode.h>
75#include <sys/vmmeter.h>
76#include <sys/mman.h>
77#include <sys/mount.h>
78
79#include <vm/vm.h>
80#include <vm/vm_param.h>
81#include <vm/pmap.h>
82#include <vm/vm_map.h>
83#include <vm/vm_object.h>
84#include <vm/vm_page.h>
85#include <vm/vm_pageout.h>
86#include <vm/vm_pager.h>
87#include <vm/vm_zone.h>
88#include <vm/swap_pager.h>
89#include <vm/vm_kern.h>
90#include <vm/vm_extern.h>
91
92static void	vm_object_qcollapse __P((vm_object_t object));
93
94/*
95 *	Virtual memory objects maintain the actual data
96 *	associated with allocated virtual memory.  A given
97 *	page of memory exists within exactly one object.
98 *
99 *	An object is only deallocated when all "references"
100 *	are given up.  Only one "reference" to a given
101 *	region of an object should be writeable.
102 *
103 *	Associated with each object is a list of all resident
104 *	memory pages belonging to that object; this list is
105 *	maintained by the "vm_page" module, and locked by the object's
106 *	lock.
107 *
108 *	Each object also records a "pager" routine which is
109 *	used to retrieve (and store) pages to the proper backing
110 *	storage.  In addition, objects may be backed by other
111 *	objects from which they were virtual-copied.
112 *
113 *	The only items within the object structure which are
114 *	modified after time of creation are:
115 *		reference count		locked by object's lock
116 *		pager routine		locked by object's lock
117 *
118 */
119
120struct object_q vm_object_list;
121#ifndef NULL_SIMPLELOCKS
122static struct simplelock vm_object_list_lock;
123#endif
124static long vm_object_count;		/* count of all objects */
125vm_object_t kernel_object;
126vm_object_t kmem_object;
127static struct vm_object kernel_object_store;
128static struct vm_object kmem_object_store;
129extern int vm_pageout_page_count;
130
131static long object_collapses;
132static long object_bypasses;
133static int next_index;
134static vm_zone_t obj_zone;
135static struct vm_zone obj_zone_store;
136static int object_hash_rand;
137#define VM_OBJECTS_INIT 256
138static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
139
140void
141_vm_object_allocate(type, size, object)
142	objtype_t type;
143	vm_size_t size;
144	vm_object_t object;
145{
146	int incr;
147	TAILQ_INIT(&object->memq);
148	TAILQ_INIT(&object->shadow_head);
149
150	object->type = type;
151	object->size = size;
152	object->ref_count = 1;
153	object->flags = 0;
154	if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
155		vm_object_set_flag(object, OBJ_ONEMAPPING);
156	object->paging_in_progress = 0;
157	object->resident_page_count = 0;
158	object->shadow_count = 0;
159	object->pg_color = next_index;
160	if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
161		incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
162	else
163		incr = size;
164	next_index = (next_index + incr) & PQ_L2_MASK;
165	object->handle = NULL;
166	object->backing_object = NULL;
167	object->backing_object_offset = (vm_ooffset_t) 0;
168	/*
169	 * Try to generate a number that will spread objects out in the
170	 * hash table.  We 'wipe' new objects across the hash in 128 page
171	 * increments plus 1 more to offset it a little more by the time
172	 * it wraps around.
173	 */
174	object->hash_rand = object_hash_rand - 129;
175
176	object->generation++;
177
178	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
179	vm_object_count++;
180	object_hash_rand = object->hash_rand;
181}
182
183/*
184 *	vm_object_init:
185 *
186 *	Initialize the VM objects module.
187 */
188void
189vm_object_init()
190{
191	TAILQ_INIT(&vm_object_list);
192	simple_lock_init(&vm_object_list_lock);
193	vm_object_count = 0;
194
195	kernel_object = &kernel_object_store;
196	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
197	    kernel_object);
198
199	kmem_object = &kmem_object_store;
200	_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
201	    kmem_object);
202
203	obj_zone = &obj_zone_store;
204	zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
205		vm_objects_init, VM_OBJECTS_INIT);
206}
207
208void
209vm_object_init2() {
210	zinitna(obj_zone, NULL, NULL, 0, 0, 0, 1);
211}
212
213/*
214 *	vm_object_allocate:
215 *
216 *	Returns a new object with the given size.
217 */
218
219vm_object_t
220vm_object_allocate(type, size)
221	objtype_t type;
222	vm_size_t size;
223{
224	vm_object_t result;
225
226	result = (vm_object_t) zalloc(obj_zone);
227
228	_vm_object_allocate(type, size, result);
229
230	return (result);
231}
232
233
234/*
235 *	vm_object_reference:
236 *
237 *	Gets another reference to the given object.
238 */
239void
240vm_object_reference(object)
241	vm_object_t object;
242{
243	if (object == NULL)
244		return;
245
246	KASSERT(!(object->flags & OBJ_DEAD),
247	    ("vm_object_reference: attempting to reference dead obj"));
248
249	object->ref_count++;
250	if (object->type == OBJT_VNODE) {
251		while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) {
252			printf("vm_object_reference: delay in getting object\n");
253		}
254	}
255}
256
257void
258vm_object_vndeallocate(object)
259	vm_object_t object;
260{
261	struct vnode *vp = (struct vnode *) object->handle;
262
263	KASSERT(object->type == OBJT_VNODE,
264	    ("vm_object_vndeallocate: not a vnode object"));
265	KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
266#ifdef INVARIANTS
267	if (object->ref_count == 0) {
268		vprint("vm_object_vndeallocate", vp);
269		panic("vm_object_vndeallocate: bad object reference count");
270	}
271#endif
272
273	object->ref_count--;
274	if (object->ref_count == 0) {
275		vp->v_flag &= ~VTEXT;
276		vm_object_clear_flag(object, OBJ_OPT);
277	}
278	vrele(vp);
279}
280
281/*
282 *	vm_object_deallocate:
283 *
284 *	Release a reference to the specified object,
285 *	gained either through a vm_object_allocate
286 *	or a vm_object_reference call.  When all references
287 *	are gone, storage associated with this object
288 *	may be relinquished.
289 *
290 *	No object may be locked.
291 */
292void
293vm_object_deallocate(object)
294	vm_object_t object;
295{
296	vm_object_t temp;
297
298	while (object != NULL) {
299
300		if (object->type == OBJT_VNODE) {
301			vm_object_vndeallocate(object);
302			return;
303		}
304
305		if (object->ref_count == 0) {
306			panic("vm_object_deallocate: object deallocated too many times: %d", object->type);
307		} else if (object->ref_count > 2) {
308			object->ref_count--;
309			return;
310		}
311
312		/*
313		 * Here on ref_count of one or two, which are special cases for
314		 * objects.
315		 */
316		if ((object->ref_count == 2) && (object->shadow_count == 0)) {
317			vm_object_set_flag(object, OBJ_ONEMAPPING);
318			object->ref_count--;
319			return;
320		} else if ((object->ref_count == 2) && (object->shadow_count == 1)) {
321			object->ref_count--;
322			if ((object->handle == NULL) &&
323			    (object->type == OBJT_DEFAULT ||
324			     object->type == OBJT_SWAP)) {
325				vm_object_t robject;
326
327				robject = TAILQ_FIRST(&object->shadow_head);
328				KASSERT(robject != NULL,
329				    ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
330					 object->ref_count,
331					 object->shadow_count));
332				if ((robject->handle == NULL) &&
333				    (robject->type == OBJT_DEFAULT ||
334				     robject->type == OBJT_SWAP)) {
335
336					robject->ref_count++;
337
338					while (
339						robject->paging_in_progress ||
340						object->paging_in_progress
341					) {
342						vm_object_pip_sleep(robject, "objde1");
343						vm_object_pip_sleep(object, "objde2");
344					}
345
346					if (robject->ref_count == 1) {
347						robject->ref_count--;
348						object = robject;
349						goto doterm;
350					}
351
352					object = robject;
353					vm_object_collapse(object);
354					continue;
355				}
356			}
357
358			return;
359
360		} else {
361			object->ref_count--;
362			if (object->ref_count != 0)
363				return;
364		}
365
366doterm:
367
368		temp = object->backing_object;
369		if (temp) {
370			TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
371			temp->shadow_count--;
372			if (temp->ref_count == 0)
373				vm_object_clear_flag(temp, OBJ_OPT);
374			temp->generation++;
375			object->backing_object = NULL;
376		}
377		vm_object_terminate(object);
378		/* unlocks and deallocates object */
379		object = temp;
380	}
381}
382
383/*
384 *	vm_object_terminate actually destroys the specified object, freeing
385 *	up all previously used resources.
386 *
387 *	The object must be locked.
388 *	This routine may block.
389 */
390void
391vm_object_terminate(object)
392	vm_object_t object;
393{
394	vm_page_t p;
395	int s;
396
397	/*
398	 * Make sure no one uses us.
399	 */
400	vm_object_set_flag(object, OBJ_DEAD);
401
402	/*
403	 * wait for the pageout daemon to be done with the object
404	 */
405	vm_object_pip_wait(object, "objtrm");
406
407	KASSERT(!object->paging_in_progress,
408		("vm_object_terminate: pageout in progress"));
409
410	/*
411	 * Clean and free the pages, as appropriate. All references to the
412	 * object are gone, so we don't need to lock it.
413	 */
414	if (object->type == OBJT_VNODE) {
415		struct vnode *vp;
416
417		/*
418		 * Freeze optimized copies.
419		 */
420		vm_freeze_copyopts(object, 0, object->size);
421
422		/*
423		 * Clean pages and flush buffers.
424		 */
425		vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
426
427		vp = (struct vnode *) object->handle;
428		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
429	}
430
431	if (object->ref_count != 0)
432		panic("vm_object_terminate: object with references, ref_count=%d", object->ref_count);
433
434	/*
435	 * Now free any remaining pages. For internal objects, this also
436	 * removes them from paging queues. Don't free wired pages, just
437	 * remove them from the object.
438	 */
439	s = splvm();
440	while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
441		if (p->busy || (p->flags & PG_BUSY))
442			panic("vm_object_terminate: freeing busy page %p\n", p);
443		if (p->wire_count == 0) {
444			vm_page_busy(p);
445			vm_page_free(p);
446			cnt.v_pfree++;
447		} else {
448			vm_page_busy(p);
449			vm_page_remove(p);
450		}
451	}
452	splx(s);
453
454	/*
455	 * Let the pager know object is dead.
456	 */
457	vm_pager_deallocate(object);
458
459	/*
460	 * Remove the object from the global object list.
461	 */
462	simple_lock(&vm_object_list_lock);
463	TAILQ_REMOVE(&vm_object_list, object, object_list);
464	simple_unlock(&vm_object_list_lock);
465
466	wakeup(object);
467
468	/*
469	 * Free the space for the object.
470	 */
471	zfree(obj_zone, object);
472}
473
474/*
475 *	vm_object_page_clean
476 *
477 *	Clean all dirty pages in the specified range of object.  Leaves page
478 * 	on whatever queue it is currently on.   If NOSYNC is set then do not
479 *	write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
480 *	leaving the object dirty.
481 *
482 *	Odd semantics: if start == end, we clean everything.
483 *
484 *	The object must be locked.
485 */
486
487void
488vm_object_page_clean(object, start, end, flags)
489	vm_object_t object;
490	vm_pindex_t start;
491	vm_pindex_t end;
492	int flags;
493{
494	vm_page_t p, np, tp;
495	vm_offset_t tstart, tend;
496	vm_pindex_t pi;
497	int s;
498	struct vnode *vp;
499	int runlen;
500	int maxf;
501	int chkb;
502	int maxb;
503	int i;
504	int clearobjflags;
505	int pagerflags;
506	vm_page_t maf[vm_pageout_page_count];
507	vm_page_t mab[vm_pageout_page_count];
508	vm_page_t ma[vm_pageout_page_count];
509	int curgeneration;
510
511	if (object->type != OBJT_VNODE ||
512		(object->flags & OBJ_MIGHTBEDIRTY) == 0)
513		return;
514
515	pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : 0;
516	pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
517
518	vp = object->handle;
519
520	vm_object_set_flag(object, OBJ_CLEANING);
521
522	tstart = start;
523	if (end == 0) {
524		tend = object->size;
525	} else {
526		tend = end;
527	}
528
529	/*
530	 * Generally set CLEANCHK interlock and make the page read-only so
531	 * we can then clear the object flags.
532	 *
533	 * However, if this is a nosync mmap then the object is likely to
534	 * stay dirty so do not mess with the page and do not clear the
535	 * object flags.
536	 */
537
538	clearobjflags = 1;
539
540	for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) {
541		vm_page_flag_set(p, PG_CLEANCHK);
542		if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
543			clearobjflags = 0;
544		else
545			vm_page_protect(p, VM_PROT_READ);
546	}
547
548	if (clearobjflags && (tstart == 0) && (tend == object->size)) {
549		vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
550	}
551
552rescan:
553	curgeneration = object->generation;
554
555	for(p = TAILQ_FIRST(&object->memq); p; p = np) {
556		np = TAILQ_NEXT(p, listq);
557
558		pi = p->pindex;
559		if (((p->flags & PG_CLEANCHK) == 0) ||
560			(pi < tstart) || (pi >= tend) ||
561			(p->valid == 0) ||
562			((p->queue - p->pc) == PQ_CACHE)) {
563			vm_page_flag_clear(p, PG_CLEANCHK);
564			continue;
565		}
566
567		vm_page_test_dirty(p);
568		if ((p->dirty & p->valid) == 0) {
569			vm_page_flag_clear(p, PG_CLEANCHK);
570			continue;
571		}
572
573		/*
574		 * If we have been asked to skip nosync pages and this is a
575		 * nosync page, skip it.  Note that the object flags were
576		 * not cleared in this case so we do not have to set them.
577		 */
578		if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
579			vm_page_flag_clear(p, PG_CLEANCHK);
580			continue;
581		}
582
583		s = splvm();
584		while (vm_page_sleep_busy(p, TRUE, "vpcwai")) {
585			if (object->generation != curgeneration) {
586				splx(s);
587				goto rescan;
588			}
589		}
590
591		maxf = 0;
592		for(i=1;i<vm_pageout_page_count;i++) {
593			if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
594				if ((tp->flags & PG_BUSY) ||
595					(tp->flags & PG_CLEANCHK) == 0 ||
596					(tp->busy != 0))
597					break;
598				if((tp->queue - tp->pc) == PQ_CACHE) {
599					vm_page_flag_clear(tp, PG_CLEANCHK);
600					break;
601				}
602				vm_page_test_dirty(tp);
603				if ((tp->dirty & tp->valid) == 0) {
604					vm_page_flag_clear(tp, PG_CLEANCHK);
605					break;
606				}
607				maf[ i - 1 ] = tp;
608				maxf++;
609				continue;
610			}
611			break;
612		}
613
614		maxb = 0;
615		chkb = vm_pageout_page_count -  maxf;
616		if (chkb) {
617			for(i = 1; i < chkb;i++) {
618				if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
619					if ((tp->flags & PG_BUSY) ||
620						(tp->flags & PG_CLEANCHK) == 0 ||
621						(tp->busy != 0))
622						break;
623					if((tp->queue - tp->pc) == PQ_CACHE) {
624						vm_page_flag_clear(tp, PG_CLEANCHK);
625						break;
626					}
627					vm_page_test_dirty(tp);
628					if ((tp->dirty & tp->valid) == 0) {
629						vm_page_flag_clear(tp, PG_CLEANCHK);
630						break;
631					}
632					mab[ i - 1 ] = tp;
633					maxb++;
634					continue;
635				}
636				break;
637			}
638		}
639
640		for(i=0;i<maxb;i++) {
641			int index = (maxb - i) - 1;
642			ma[index] = mab[i];
643			vm_page_flag_clear(ma[index], PG_CLEANCHK);
644		}
645		vm_page_flag_clear(p, PG_CLEANCHK);
646		ma[maxb] = p;
647		for(i=0;i<maxf;i++) {
648			int index = (maxb + i) + 1;
649			ma[index] = maf[i];
650			vm_page_flag_clear(ma[index], PG_CLEANCHK);
651		}
652		runlen = maxb + maxf + 1;
653
654		splx(s);
655		vm_pageout_flush(ma, runlen, pagerflags);
656		for (i = 0; i<runlen; i++) {
657			if (ma[i]->valid & ma[i]->dirty) {
658				vm_page_protect(ma[i], VM_PROT_READ);
659				vm_page_flag_set(ma[i], PG_CLEANCHK);
660			}
661		}
662		if (object->generation != curgeneration)
663			goto rescan;
664	}
665
666#if 0
667	VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
668#endif
669
670	vm_object_clear_flag(object, OBJ_CLEANING);
671	return;
672}
673
674#ifdef not_used
675/* XXX I cannot tell if this should be an exported symbol */
676/*
677 *	vm_object_deactivate_pages
678 *
679 *	Deactivate all pages in the specified object.  (Keep its pages
680 *	in memory even though it is no longer referenced.)
681 *
682 *	The object must be locked.
683 */
684static void
685vm_object_deactivate_pages(object)
686	vm_object_t object;
687{
688	vm_page_t p, next;
689
690	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
691		next = TAILQ_NEXT(p, listq);
692		vm_page_deactivate(p);
693	}
694}
695#endif
696
697/*
698 * Same as vm_object_pmap_copy, except range checking really
699 * works, and is meant for small sections of an object.
700 *
701 * This code protects resident pages by making them read-only
702 * and is typically called on a fork or split when a page
703 * is converted to copy-on-write.
704 *
705 * NOTE: If the page is already at VM_PROT_NONE, calling
706 * vm_page_protect will have no effect.
707 */
708
709void
710vm_object_pmap_copy_1(object, start, end)
711	vm_object_t object;
712	vm_pindex_t start;
713	vm_pindex_t end;
714{
715	vm_pindex_t idx;
716	vm_page_t p;
717
718	if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
719		return;
720
721	for (idx = start; idx < end; idx++) {
722		p = vm_page_lookup(object, idx);
723		if (p == NULL)
724			continue;
725		vm_page_protect(p, VM_PROT_READ);
726	}
727}
728
729/*
730 *	vm_object_pmap_remove:
731 *
732 *	Removes all physical pages in the specified
733 *	object range from all physical maps.
734 *
735 *	The object must *not* be locked.
736 */
737void
738vm_object_pmap_remove(object, start, end)
739	vm_object_t object;
740	vm_pindex_t start;
741	vm_pindex_t end;
742{
743	vm_page_t p;
744
745	if (object == NULL)
746		return;
747	for (p = TAILQ_FIRST(&object->memq);
748		p != NULL;
749		p = TAILQ_NEXT(p, listq)) {
750		if (p->pindex >= start && p->pindex < end)
751			vm_page_protect(p, VM_PROT_NONE);
752	}
753	if ((start == 0) && (object->size == end))
754		vm_object_clear_flag(object, OBJ_WRITEABLE);
755}
756
757/*
758 *	vm_object_madvise:
759 *
760 *	Implements the madvise function at the object/page level.
761 *
762 *	MADV_WILLNEED	(any object)
763 *
764 *	    Activate the specified pages if they are resident.
765 *
766 *	MADV_DONTNEED	(any object)
767 *
768 *	    Deactivate the specified pages if they are resident.
769 *
770 *	MADV_FREE	(OBJT_DEFAULT/OBJT_SWAP objects,
771 *			 OBJ_ONEMAPPING only)
772 *
773 *	    Deactivate and clean the specified pages if they are
774 *	    resident.  This permits the process to reuse the pages
775 *	    without faulting or the kernel to reclaim the pages
776 *	    without I/O.
777 */
778void
779vm_object_madvise(object, pindex, count, advise)
780	vm_object_t object;
781	vm_pindex_t pindex;
782	int count;
783	int advise;
784{
785	vm_pindex_t end, tpindex;
786	vm_object_t tobject;
787	vm_page_t m;
788
789	if (object == NULL)
790		return;
791
792	end = pindex + count;
793
794	/*
795	 * Locate and adjust resident pages
796	 */
797
798	for (; pindex < end; pindex += 1) {
799relookup:
800		tobject = object;
801		tpindex = pindex;
802shadowlookup:
803		/*
804		 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
805		 * and those pages must be OBJ_ONEMAPPING.
806		 */
807		if (advise == MADV_FREE) {
808			if ((tobject->type != OBJT_DEFAULT &&
809			     tobject->type != OBJT_SWAP) ||
810			    (tobject->flags & OBJ_ONEMAPPING) == 0) {
811				continue;
812			}
813		}
814
815		m = vm_page_lookup(tobject, tpindex);
816
817		if (m == NULL) {
818			/*
819			 * There may be swap even if there is no backing page
820			 */
821			if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
822				swap_pager_freespace(tobject, tpindex, 1);
823
824			/*
825			 * next object
826			 */
827			tobject = tobject->backing_object;
828			if (tobject == NULL)
829				continue;
830			tpindex += OFF_TO_IDX(tobject->backing_object_offset);
831			goto shadowlookup;
832		}
833
834		/*
835		 * If the page is busy or not in a normal active state,
836		 * we skip it.  If the page is not managed there are no
837		 * page queues to mess with.  Things can break if we mess
838		 * with pages in any of the below states.
839		 */
840		if (
841		    m->hold_count ||
842		    m->wire_count ||
843		    (m->flags & PG_UNMANAGED) ||
844		    m->valid != VM_PAGE_BITS_ALL
845		) {
846			continue;
847		}
848
849 		if (vm_page_sleep_busy(m, TRUE, "madvpo"))
850  			goto relookup;
851
852		if (advise == MADV_WILLNEED) {
853			vm_page_activate(m);
854		} else if (advise == MADV_DONTNEED) {
855			vm_page_dontneed(m);
856		} else if (advise == MADV_FREE) {
857			/*
858			 * Mark the page clean.  This will allow the page
859			 * to be freed up by the system.  However, such pages
860			 * are often reused quickly by malloc()/free()
861			 * so we do not do anything that would cause
862			 * a page fault if we can help it.
863			 *
864			 * Specifically, we do not try to actually free
865			 * the page now nor do we try to put it in the
866			 * cache (which would cause a page fault on reuse).
867			 *
868			 * But we do make the page is freeable as we
869			 * can without actually taking the step of unmapping
870			 * it.
871			 */
872			pmap_clear_modify(m);
873			m->dirty = 0;
874			m->act_count = 0;
875			vm_page_dontneed(m);
876			if (tobject->type == OBJT_SWAP)
877				swap_pager_freespace(tobject, tpindex, 1);
878		}
879	}
880}
881
882/*
883 *	vm_object_shadow:
884 *
885 *	Create a new object which is backed by the
886 *	specified existing object range.  The source
887 *	object reference is deallocated.
888 *
889 *	The new object and offset into that object
890 *	are returned in the source parameters.
891 */
892
893void
894vm_object_shadow(object, offset, length)
895	vm_object_t *object;	/* IN/OUT */
896	vm_ooffset_t *offset;	/* IN/OUT */
897	vm_size_t length;
898{
899	vm_object_t source;
900	vm_object_t result;
901
902	source = *object;
903
904	/*
905	 * Don't create the new object if the old object isn't shared.
906	 */
907
908	if (source != NULL &&
909	    source->ref_count == 1 &&
910	    source->handle == NULL &&
911	    (source->type == OBJT_DEFAULT ||
912	     source->type == OBJT_SWAP))
913		return;
914
915	/*
916	 * Allocate a new object with the given length
917	 */
918
919	if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
920		panic("vm_object_shadow: no object for shadowing");
921
922	/*
923	 * The new object shadows the source object, adding a reference to it.
924	 * Our caller changes his reference to point to the new object,
925	 * removing a reference to the source object.  Net result: no change
926	 * of reference count.
927	 *
928	 * Try to optimize the result object's page color when shadowing
929	 * in order to maintain page coloring consistency in the combined
930	 * shadowed object.
931	 */
932	result->backing_object = source;
933	if (source) {
934		TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list);
935		source->shadow_count++;
936		source->generation++;
937		result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK;
938	}
939
940	/*
941	 * Store the offset into the source object, and fix up the offset into
942	 * the new object.
943	 */
944
945	result->backing_object_offset = *offset;
946
947	/*
948	 * Return the new things
949	 */
950
951	*offset = 0;
952	*object = result;
953}
954
955#define	OBSC_TEST_ALL_SHADOWED	0x0001
956#define	OBSC_COLLAPSE_NOWAIT	0x0002
957#define	OBSC_COLLAPSE_WAIT	0x0004
958
959static __inline int
960vm_object_backing_scan(vm_object_t object, int op)
961{
962	int s;
963	int r = 1;
964	vm_page_t p;
965	vm_object_t backing_object;
966	vm_pindex_t backing_offset_index;
967
968	s = splvm();
969
970	backing_object = object->backing_object;
971	backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
972
973	/*
974	 * Initial conditions
975	 */
976
977	if (op & OBSC_TEST_ALL_SHADOWED) {
978		/*
979		 * We do not want to have to test for the existence of
980		 * swap pages in the backing object.  XXX but with the
981		 * new swapper this would be pretty easy to do.
982		 *
983		 * XXX what about anonymous MAP_SHARED memory that hasn't
984		 * been ZFOD faulted yet?  If we do not test for this, the
985		 * shadow test may succeed! XXX
986		 */
987		if (backing_object->type != OBJT_DEFAULT) {
988			splx(s);
989			return(0);
990		}
991	}
992	if (op & OBSC_COLLAPSE_WAIT) {
993		vm_object_set_flag(backing_object, OBJ_DEAD);
994	}
995
996	/*
997	 * Our scan
998	 */
999
1000	p = TAILQ_FIRST(&backing_object->memq);
1001	while (p) {
1002		vm_page_t next = TAILQ_NEXT(p, listq);
1003		vm_pindex_t new_pindex = p->pindex - backing_offset_index;
1004
1005		if (op & OBSC_TEST_ALL_SHADOWED) {
1006			vm_page_t pp;
1007
1008			/*
1009			 * Ignore pages outside the parent object's range
1010			 * and outside the parent object's mapping of the
1011			 * backing object.
1012			 *
1013			 * note that we do not busy the backing object's
1014			 * page.
1015			 */
1016
1017			if (
1018			    p->pindex < backing_offset_index ||
1019			    new_pindex >= object->size
1020			) {
1021				p = next;
1022				continue;
1023			}
1024
1025			/*
1026			 * See if the parent has the page or if the parent's
1027			 * object pager has the page.  If the parent has the
1028			 * page but the page is not valid, the parent's
1029			 * object pager must have the page.
1030			 *
1031			 * If this fails, the parent does not completely shadow
1032			 * the object and we might as well give up now.
1033			 */
1034
1035			pp = vm_page_lookup(object, new_pindex);
1036			if (
1037			    (pp == NULL || pp->valid == 0) &&
1038			    !vm_pager_has_page(object, new_pindex, NULL, NULL)
1039			) {
1040				r = 0;
1041				break;
1042			}
1043		}
1044
1045		/*
1046		 * Check for busy page
1047		 */
1048
1049		if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1050			vm_page_t pp;
1051
1052			if (op & OBSC_COLLAPSE_NOWAIT) {
1053				if (
1054				    (p->flags & PG_BUSY) ||
1055				    !p->valid ||
1056				    p->hold_count ||
1057				    p->wire_count ||
1058				    p->busy
1059				) {
1060					p = next;
1061					continue;
1062				}
1063			} else if (op & OBSC_COLLAPSE_WAIT) {
1064				if (vm_page_sleep_busy(p, TRUE, "vmocol")) {
1065					/*
1066					 * If we slept, anything could have
1067					 * happened.  Since the object is
1068					 * marked dead, the backing offset
1069					 * should not have changed so we
1070					 * just restart our scan.
1071					 */
1072					p = TAILQ_FIRST(&backing_object->memq);
1073					continue;
1074				}
1075			}
1076
1077			/*
1078			 * Busy the page
1079			 */
1080			vm_page_busy(p);
1081
1082			KASSERT(
1083			    p->object == backing_object,
1084			    ("vm_object_qcollapse(): object mismatch")
1085			);
1086
1087			/*
1088			 * Destroy any associated swap
1089			 */
1090			if (backing_object->type == OBJT_SWAP) {
1091				swap_pager_freespace(
1092				    backing_object,
1093				    p->pindex,
1094				    1
1095				);
1096			}
1097
1098			if (
1099			    p->pindex < backing_offset_index ||
1100			    new_pindex >= object->size
1101			) {
1102				/*
1103				 * Page is out of the parent object's range, we
1104				 * can simply destroy it.
1105				 */
1106				vm_page_protect(p, VM_PROT_NONE);
1107				vm_page_free(p);
1108				p = next;
1109				continue;
1110			}
1111
1112			pp = vm_page_lookup(object, new_pindex);
1113			if (
1114			    pp != NULL ||
1115			    vm_pager_has_page(object, new_pindex, NULL, NULL)
1116			) {
1117				/*
1118				 * page already exists in parent OR swap exists
1119				 * for this location in the parent.  Destroy
1120				 * the original page from the backing object.
1121				 *
1122				 * Leave the parent's page alone
1123				 */
1124				vm_page_protect(p, VM_PROT_NONE);
1125				vm_page_free(p);
1126				p = next;
1127				continue;
1128			}
1129
1130			/*
1131			 * Page does not exist in parent, rename the
1132			 * page from the backing object to the main object.
1133			 *
1134			 * If the page was mapped to a process, it can remain
1135			 * mapped through the rename.
1136			 */
1137			if ((p->queue - p->pc) == PQ_CACHE)
1138				vm_page_deactivate(p);
1139
1140			vm_page_rename(p, object, new_pindex);
1141			/* page automatically made dirty by rename */
1142		}
1143		p = next;
1144	}
1145	splx(s);
1146	return(r);
1147}
1148
1149
1150/*
1151 * this version of collapse allows the operation to occur earlier and
1152 * when paging_in_progress is true for an object...  This is not a complete
1153 * operation, but should plug 99.9% of the rest of the leaks.
1154 */
1155static void
1156vm_object_qcollapse(object)
1157	vm_object_t object;
1158{
1159	vm_object_t backing_object = object->backing_object;
1160
1161	if (backing_object->ref_count != 1)
1162		return;
1163
1164	backing_object->ref_count += 2;
1165
1166	vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1167
1168	backing_object->ref_count -= 2;
1169}
1170
1171/*
1172 *	vm_object_collapse:
1173 *
1174 *	Collapse an object with the object backing it.
1175 *	Pages in the backing object are moved into the
1176 *	parent, and the backing object is deallocated.
1177 */
1178void
1179vm_object_collapse(object)
1180	vm_object_t object;
1181{
1182	while (TRUE) {
1183		vm_object_t backing_object;
1184
1185		/*
1186		 * Verify that the conditions are right for collapse:
1187		 *
1188		 * The object exists and the backing object exists.
1189		 */
1190		if (object == NULL)
1191			break;
1192
1193		if ((backing_object = object->backing_object) == NULL)
1194			break;
1195
1196		/*
1197		 * we check the backing object first, because it is most likely
1198		 * not collapsable.
1199		 */
1200		if (backing_object->handle != NULL ||
1201		    (backing_object->type != OBJT_DEFAULT &&
1202		     backing_object->type != OBJT_SWAP) ||
1203		    (backing_object->flags & OBJ_DEAD) ||
1204		    object->handle != NULL ||
1205		    (object->type != OBJT_DEFAULT &&
1206		     object->type != OBJT_SWAP) ||
1207		    (object->flags & OBJ_DEAD)) {
1208			break;
1209		}
1210
1211		if (
1212		    object->paging_in_progress != 0 ||
1213		    backing_object->paging_in_progress != 0
1214		) {
1215			vm_object_qcollapse(object);
1216			break;
1217		}
1218
1219		/*
1220		 * We know that we can either collapse the backing object (if
1221		 * the parent is the only reference to it) or (perhaps) have
1222		 * the parent bypass the object if the parent happens to shadow
1223		 * all the resident pages in the entire backing object.
1224		 *
1225		 * This is ignoring pager-backed pages such as swap pages.
1226		 * vm_object_backing_scan fails the shadowing test in this
1227		 * case.
1228		 */
1229
1230		if (backing_object->ref_count == 1) {
1231			/*
1232			 * If there is exactly one reference to the backing
1233			 * object, we can collapse it into the parent.
1234			 */
1235
1236			vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1237
1238			/*
1239			 * Move the pager from backing_object to object.
1240			 */
1241
1242			if (backing_object->type == OBJT_SWAP) {
1243				vm_object_pip_add(backing_object, 1);
1244
1245				/*
1246				 * scrap the paging_offset junk and do a
1247				 * discrete copy.  This also removes major
1248				 * assumptions about how the swap-pager
1249				 * works from where it doesn't belong.  The
1250				 * new swapper is able to optimize the
1251				 * destroy-source case.
1252				 */
1253
1254				vm_object_pip_add(object, 1);
1255				swap_pager_copy(
1256				    backing_object,
1257				    object,
1258				    OFF_TO_IDX(object->backing_object_offset), TRUE);
1259				vm_object_pip_wakeup(object);
1260
1261				vm_object_pip_wakeup(backing_object);
1262			}
1263			/*
1264			 * Object now shadows whatever backing_object did.
1265			 * Note that the reference to
1266			 * backing_object->backing_object moves from within
1267			 * backing_object to within object.
1268			 */
1269
1270			TAILQ_REMOVE(
1271			    &object->backing_object->shadow_head,
1272			    object,
1273			    shadow_list
1274			);
1275			object->backing_object->shadow_count--;
1276			object->backing_object->generation++;
1277			if (backing_object->backing_object) {
1278				TAILQ_REMOVE(
1279				    &backing_object->backing_object->shadow_head,
1280				    backing_object,
1281				    shadow_list
1282				);
1283				backing_object->backing_object->shadow_count--;
1284				backing_object->backing_object->generation++;
1285			}
1286			object->backing_object = backing_object->backing_object;
1287			if (object->backing_object) {
1288				TAILQ_INSERT_TAIL(
1289				    &object->backing_object->shadow_head,
1290				    object,
1291				    shadow_list
1292				);
1293				object->backing_object->shadow_count++;
1294				object->backing_object->generation++;
1295			}
1296
1297			object->backing_object_offset +=
1298			    backing_object->backing_object_offset;
1299
1300			/*
1301			 * Discard backing_object.
1302			 *
1303			 * Since the backing object has no pages, no pager left,
1304			 * and no object references within it, all that is
1305			 * necessary is to dispose of it.
1306			 */
1307
1308			TAILQ_REMOVE(
1309			    &vm_object_list,
1310			    backing_object,
1311			    object_list
1312			);
1313			vm_object_count--;
1314
1315			zfree(obj_zone, backing_object);
1316
1317			object_collapses++;
1318		} else {
1319			vm_object_t new_backing_object;
1320
1321			/*
1322			 * If we do not entirely shadow the backing object,
1323			 * there is nothing we can do so we give up.
1324			 */
1325
1326			if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
1327				break;
1328			}
1329
1330			/*
1331			 * Make the parent shadow the next object in the
1332			 * chain.  Deallocating backing_object will not remove
1333			 * it, since its reference count is at least 2.
1334			 */
1335
1336			TAILQ_REMOVE(
1337			    &backing_object->shadow_head,
1338			    object,
1339			    shadow_list
1340			);
1341			backing_object->shadow_count--;
1342			backing_object->generation++;
1343
1344			new_backing_object = backing_object->backing_object;
1345			if ((object->backing_object = new_backing_object) != NULL) {
1346				vm_object_reference(new_backing_object);
1347				TAILQ_INSERT_TAIL(
1348				    &new_backing_object->shadow_head,
1349				    object,
1350				    shadow_list
1351				);
1352				new_backing_object->shadow_count++;
1353				new_backing_object->generation++;
1354				object->backing_object_offset +=
1355					backing_object->backing_object_offset;
1356			}
1357
1358			/*
1359			 * Drop the reference count on backing_object. Since
1360			 * its ref_count was at least 2, it will not vanish;
1361			 * so we don't need to call vm_object_deallocate, but
1362			 * we do anyway.
1363			 */
1364			vm_object_deallocate(backing_object);
1365			object_bypasses++;
1366		}
1367
1368		/*
1369		 * Try again with this object's new backing object.
1370		 */
1371	}
1372}
1373
1374/*
1375 *	vm_object_page_remove: [internal]
1376 *
1377 *	Removes all physical pages in the specified
1378 *	object range from the object's list of pages.
1379 *
1380 *	The object must be locked.
1381 */
1382void
1383vm_object_page_remove(object, start, end, clean_only)
1384	vm_object_t object;
1385	vm_pindex_t start;
1386	vm_pindex_t end;
1387	boolean_t clean_only;
1388{
1389	vm_page_t p, next;
1390	unsigned int size;
1391	int all;
1392
1393	if (object == NULL ||
1394	    object->resident_page_count == 0)
1395		return;
1396
1397	all = ((end == 0) && (start == 0));
1398
1399	/*
1400	 * Since physically-backed objects do not use managed pages, we can't
1401	 * remove pages from the object (we must instead remove the page
1402	 * references, and then destroy the object).
1403	 */
1404	KASSERT(object->type != OBJT_PHYS, ("attempt to remove pages from a physical object"));
1405
1406	vm_object_pip_add(object, 1);
1407again:
1408	size = end - start;
1409	if (all || size > object->resident_page_count / 4) {
1410		for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
1411			next = TAILQ_NEXT(p, listq);
1412			if (all || ((start <= p->pindex) && (p->pindex < end))) {
1413				if (p->wire_count != 0) {
1414					vm_page_protect(p, VM_PROT_NONE);
1415					if (!clean_only)
1416						p->valid = 0;
1417					continue;
1418				}
1419
1420				/*
1421				 * The busy flags are only cleared at
1422				 * interrupt -- minimize the spl transitions
1423				 */
1424
1425 				if (vm_page_sleep_busy(p, TRUE, "vmopar"))
1426 					goto again;
1427
1428				if (clean_only && p->valid) {
1429					vm_page_test_dirty(p);
1430					if (p->valid & p->dirty)
1431						continue;
1432				}
1433
1434				vm_page_busy(p);
1435				vm_page_protect(p, VM_PROT_NONE);
1436				vm_page_free(p);
1437			}
1438		}
1439	} else {
1440		while (size > 0) {
1441			if ((p = vm_page_lookup(object, start)) != 0) {
1442
1443				if (p->wire_count != 0) {
1444					vm_page_protect(p, VM_PROT_NONE);
1445					if (!clean_only)
1446						p->valid = 0;
1447					start += 1;
1448					size -= 1;
1449					continue;
1450				}
1451
1452				/*
1453				 * The busy flags are only cleared at
1454				 * interrupt -- minimize the spl transitions
1455				 */
1456 				if (vm_page_sleep_busy(p, TRUE, "vmopar"))
1457					goto again;
1458
1459				if (clean_only && p->valid) {
1460					vm_page_test_dirty(p);
1461					if (p->valid & p->dirty) {
1462						start += 1;
1463						size -= 1;
1464						continue;
1465					}
1466				}
1467
1468				vm_page_busy(p);
1469				vm_page_protect(p, VM_PROT_NONE);
1470				vm_page_free(p);
1471			}
1472			start += 1;
1473			size -= 1;
1474		}
1475	}
1476	vm_object_pip_wakeup(object);
1477}
1478
1479/*
1480 *	Routine:	vm_object_coalesce
1481 *	Function:	Coalesces two objects backing up adjoining
1482 *			regions of memory into a single object.
1483 *
1484 *	returns TRUE if objects were combined.
1485 *
1486 *	NOTE:	Only works at the moment if the second object is NULL -
1487 *		if it's not, which object do we lock first?
1488 *
1489 *	Parameters:
1490 *		prev_object	First object to coalesce
1491 *		prev_offset	Offset into prev_object
1492 *		next_object	Second object into coalesce
1493 *		next_offset	Offset into next_object
1494 *
1495 *		prev_size	Size of reference to prev_object
1496 *		next_size	Size of reference to next_object
1497 *
1498 *	Conditions:
1499 *	The object must *not* be locked.
1500 */
1501boolean_t
1502vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
1503	vm_object_t prev_object;
1504	vm_pindex_t prev_pindex;
1505	vm_size_t prev_size, next_size;
1506{
1507	vm_pindex_t next_pindex;
1508
1509	if (prev_object == NULL) {
1510		return (TRUE);
1511	}
1512
1513	if (prev_object->type != OBJT_DEFAULT &&
1514	    prev_object->type != OBJT_SWAP) {
1515		return (FALSE);
1516	}
1517
1518	/*
1519	 * Try to collapse the object first
1520	 */
1521	vm_object_collapse(prev_object);
1522
1523	/*
1524	 * Can't coalesce if: . more than one reference . paged out . shadows
1525	 * another object . has a copy elsewhere (any of which mean that the
1526	 * pages not mapped to prev_entry may be in use anyway)
1527	 */
1528
1529	if (prev_object->backing_object != NULL) {
1530		return (FALSE);
1531	}
1532
1533	prev_size >>= PAGE_SHIFT;
1534	next_size >>= PAGE_SHIFT;
1535	next_pindex = prev_pindex + prev_size;
1536
1537	if ((prev_object->ref_count > 1) &&
1538	    (prev_object->size != next_pindex)) {
1539		return (FALSE);
1540	}
1541
1542	/*
1543	 * Remove any pages that may still be in the object from a previous
1544	 * deallocation.
1545	 */
1546	if (next_pindex < prev_object->size) {
1547		vm_object_page_remove(prev_object,
1548				      next_pindex,
1549				      next_pindex + next_size, FALSE);
1550		if (prev_object->type == OBJT_SWAP)
1551			swap_pager_freespace(prev_object,
1552					     next_pindex, next_size);
1553	}
1554
1555	/*
1556	 * Extend the object if necessary.
1557	 */
1558	if (next_pindex + next_size > prev_object->size)
1559		prev_object->size = next_pindex + next_size;
1560
1561	return (TRUE);
1562}
1563
1564#include "opt_ddb.h"
1565#ifdef DDB
1566#include <sys/kernel.h>
1567
1568#include <sys/cons.h>
1569
1570#include <ddb/ddb.h>
1571
1572static int	_vm_object_in_map __P((vm_map_t map, vm_object_t object,
1573				       vm_map_entry_t entry));
1574static int	vm_object_in_map __P((vm_object_t object));
1575
1576static int
1577_vm_object_in_map(map, object, entry)
1578	vm_map_t map;
1579	vm_object_t object;
1580	vm_map_entry_t entry;
1581{
1582	vm_map_t tmpm;
1583	vm_map_entry_t tmpe;
1584	vm_object_t obj;
1585	int entcount;
1586
1587	if (map == 0)
1588		return 0;
1589
1590	if (entry == 0) {
1591		tmpe = map->header.next;
1592		entcount = map->nentries;
1593		while (entcount-- && (tmpe != &map->header)) {
1594			if( _vm_object_in_map(map, object, tmpe)) {
1595				return 1;
1596			}
1597			tmpe = tmpe->next;
1598		}
1599	} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
1600		tmpm = entry->object.sub_map;
1601		tmpe = tmpm->header.next;
1602		entcount = tmpm->nentries;
1603		while (entcount-- && tmpe != &tmpm->header) {
1604			if( _vm_object_in_map(tmpm, object, tmpe)) {
1605				return 1;
1606			}
1607			tmpe = tmpe->next;
1608		}
1609	} else if ((obj = entry->object.vm_object) != NULL) {
1610		for(; obj; obj=obj->backing_object)
1611			if( obj == object) {
1612				return 1;
1613			}
1614	}
1615	return 0;
1616}
1617
1618static int
1619vm_object_in_map( object)
1620	vm_object_t object;
1621{
1622	struct proc *p;
1623	ALLPROC_LOCK(AP_SHARED);
1624	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1625		if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
1626			continue;
1627		if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
1628			ALLPROC_LOCK(AP_RELEASE);
1629			return 1;
1630		}
1631	}
1632	ALLPROC_LOCK(AP_RELEASE);
1633	if( _vm_object_in_map( kernel_map, object, 0))
1634		return 1;
1635	if( _vm_object_in_map( kmem_map, object, 0))
1636		return 1;
1637	if( _vm_object_in_map( pager_map, object, 0))
1638		return 1;
1639	if( _vm_object_in_map( buffer_map, object, 0))
1640		return 1;
1641	if( _vm_object_in_map( mb_map, object, 0))
1642		return 1;
1643	return 0;
1644}
1645
1646DB_SHOW_COMMAND(vmochk, vm_object_check)
1647{
1648	vm_object_t object;
1649
1650	/*
1651	 * make sure that internal objs are in a map somewhere
1652	 * and none have zero ref counts.
1653	 */
1654	for (object = TAILQ_FIRST(&vm_object_list);
1655			object != NULL;
1656			object = TAILQ_NEXT(object, object_list)) {
1657		if (object->handle == NULL &&
1658		    (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1659			if (object->ref_count == 0) {
1660				db_printf("vmochk: internal obj has zero ref count: %ld\n",
1661					(long)object->size);
1662			}
1663			if (!vm_object_in_map(object)) {
1664				db_printf(
1665			"vmochk: internal obj is not in a map: "
1666			"ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
1667				    object->ref_count, (u_long)object->size,
1668				    (u_long)object->size,
1669				    (void *)object->backing_object);
1670			}
1671		}
1672	}
1673}
1674
1675/*
1676 *	vm_object_print:	[ debug ]
1677 */
1678DB_SHOW_COMMAND(object, vm_object_print_static)
1679{
1680	/* XXX convert args. */
1681	vm_object_t object = (vm_object_t)addr;
1682	boolean_t full = have_addr;
1683
1684	vm_page_t p;
1685
1686	/* XXX count is an (unused) arg.  Avoid shadowing it. */
1687#define	count	was_count
1688
1689	int count;
1690
1691	if (object == NULL)
1692		return;
1693
1694	db_iprintf(
1695	    "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
1696	    object, (int)object->type, (u_long)object->size,
1697	    object->resident_page_count, object->ref_count, object->flags);
1698	/*
1699	 * XXX no %qd in kernel.  Truncate object->backing_object_offset.
1700	 */
1701	db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
1702	    object->shadow_count,
1703	    object->backing_object ? object->backing_object->ref_count : 0,
1704	    object->backing_object, (long)object->backing_object_offset);
1705
1706	if (!full)
1707		return;
1708
1709	db_indent += 2;
1710	count = 0;
1711	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
1712		if (count == 0)
1713			db_iprintf("memory:=");
1714		else if (count == 6) {
1715			db_printf("\n");
1716			db_iprintf(" ...");
1717			count = 0;
1718		} else
1719			db_printf(",");
1720		count++;
1721
1722		db_printf("(off=0x%lx,page=0x%lx)",
1723		    (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1724	}
1725	if (count != 0)
1726		db_printf("\n");
1727	db_indent -= 2;
1728}
1729
1730/* XXX. */
1731#undef count
1732
1733/* XXX need this non-static entry for calling from vm_map_print. */
1734void
1735vm_object_print(addr, have_addr, count, modif)
1736        /* db_expr_t */ long addr;
1737	boolean_t have_addr;
1738	/* db_expr_t */ long count;
1739	char *modif;
1740{
1741	vm_object_print_static(addr, have_addr, count, modif);
1742}
1743
1744DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1745{
1746	vm_object_t object;
1747	int nl = 0;
1748	int c;
1749	for (object = TAILQ_FIRST(&vm_object_list);
1750			object != NULL;
1751			object = TAILQ_NEXT(object, object_list)) {
1752		vm_pindex_t idx, fidx;
1753		vm_pindex_t osize;
1754		vm_offset_t pa = -1, padiff;
1755		int rcount;
1756		vm_page_t m;
1757
1758		db_printf("new object: %p\n", (void *)object);
1759		if ( nl > 18) {
1760			c = cngetc();
1761			if (c != ' ')
1762				return;
1763			nl = 0;
1764		}
1765		nl++;
1766		rcount = 0;
1767		fidx = 0;
1768		osize = object->size;
1769		if (osize > 128)
1770			osize = 128;
1771		for(idx=0;idx<osize;idx++) {
1772			m = vm_page_lookup(object, idx);
1773			if (m == NULL) {
1774				if (rcount) {
1775					db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1776						(long)fidx, rcount, (long)pa);
1777					if ( nl > 18) {
1778						c = cngetc();
1779						if (c != ' ')
1780							return;
1781						nl = 0;
1782					}
1783					nl++;
1784					rcount = 0;
1785				}
1786				continue;
1787			}
1788
1789
1790			if (rcount &&
1791				(VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1792				++rcount;
1793				continue;
1794			}
1795			if (rcount) {
1796				padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1797				padiff >>= PAGE_SHIFT;
1798				padiff &= PQ_L2_MASK;
1799				if (padiff == 0) {
1800					pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1801					++rcount;
1802					continue;
1803				}
1804				db_printf(" index(%ld)run(%d)pa(0x%lx)",
1805					(long)fidx, rcount, (long)pa);
1806				db_printf("pd(%ld)\n", (long)padiff);
1807				if ( nl > 18) {
1808					c = cngetc();
1809					if (c != ' ')
1810						return;
1811					nl = 0;
1812				}
1813				nl++;
1814			}
1815			fidx = idx;
1816			pa = VM_PAGE_TO_PHYS(m);
1817			rcount = 1;
1818		}
1819		if (rcount) {
1820			db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1821				(long)fidx, rcount, (long)pa);
1822			if ( nl > 18) {
1823				c = cngetc();
1824				if (c != ' ')
1825					return;
1826				nl = 0;
1827			}
1828			nl++;
1829		}
1830	}
1831}
1832#endif /* DDB */
1833