vm_object.c revision 1895
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_object.c	8.5 (Berkeley) 3/22/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_object.c,v 1.3 1994/08/02 07:55:29 davidg Exp $
65 */
66
67/*
68 *	Virtual memory object module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/malloc.h>
74
75#include <vm/vm.h>
76#include <vm/vm_page.h>
77#include <vm/vm_pageout.h>
78
79static void _vm_object_allocate(vm_size_t, vm_object_t);
80void vm_object_deactivate_pages(vm_object_t);
81void vm_object_cache_trim(void);
82void vm_object_remove(vm_pager_t);
83
84/*
85 *	Virtual memory objects maintain the actual data
86 *	associated with allocated virtual memory.  A given
87 *	page of memory exists within exactly one object.
88 *
89 *	An object is only deallocated when all "references"
90 *	are given up.  Only one "reference" to a given
91 *	region of an object should be writeable.
92 *
93 *	Associated with each object is a list of all resident
94 *	memory pages belonging to that object; this list is
95 *	maintained by the "vm_page" module, and locked by the object's
96 *	lock.
97 *
98 *	Each object also records a "pager" routine which is
99 *	used to retrieve (and store) pages to the proper backing
100 *	storage.  In addition, objects may be backed by other
101 *	objects from which they were virtual-copied.
102 *
103 *	The only items within the object structure which are
104 *	modified after time of creation are:
105 *		reference count		locked by object's lock
106 *		pager routine		locked by object's lock
107 *
108 */
109
110
111struct vm_object	kernel_object_store;
112struct vm_object	kmem_object_store;
113
114extern int vm_cache_max;
115#define	VM_OBJECT_HASH_COUNT	157
116
117struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
118
119long	object_collapses = 0;
120long	object_bypasses  = 0;
121
122static void
123_vm_object_allocate(size, object)
124	vm_size_t		size;
125	register vm_object_t	object;
126{
127	bzero(object, sizeof *object);
128	TAILQ_INIT(&object->memq);
129	vm_object_lock_init(object);
130	object->ref_count = 1;
131	object->resident_page_count = 0;
132	object->size = size;
133	object->flags = OBJ_INTERNAL;	/* vm_allocate_with_pager will reset */
134	object->paging_in_progress = 0;
135	object->copy = NULL;
136
137	/*
138	 *	Object starts out read-write, with no pager.
139	 */
140
141	object->pager = NULL;
142	object->paging_offset = 0;
143	object->shadow = NULL;
144	object->shadow_offset = (vm_offset_t) 0;
145
146	simple_lock(&vm_object_list_lock);
147	TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
148	vm_object_count++;
149	cnt.v_nzfod += atop(size);
150	simple_unlock(&vm_object_list_lock);
151}
152
153/*
154 *	vm_object_init:
155 *
156 *	Initialize the VM objects module.
157 */
158void
159vm_object_init(vm_offset_t nothing)
160{
161	register int	i;
162
163	TAILQ_INIT(&vm_object_cached_list);
164	TAILQ_INIT(&vm_object_list);
165	vm_object_count = 0;
166	simple_lock_init(&vm_cache_lock);
167	simple_lock_init(&vm_object_list_lock);
168
169	for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
170		TAILQ_INIT(&vm_object_hashtable[i]);
171
172	kernel_object = &kernel_object_store;
173	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
174			kernel_object);
175
176	kmem_object = &kmem_object_store;
177	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
178			kmem_object);
179}
180
181/*
182 *	vm_object_allocate:
183 *
184 *	Returns a new object with the given size.
185 */
186
187vm_object_t
188vm_object_allocate(size)
189	vm_size_t	size;
190{
191	register vm_object_t	result;
192	int s;
193
194	result = (vm_object_t)
195		malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK);
196
197
198	_vm_object_allocate(size, result);
199
200	return(result);
201}
202
203
204/*
205 *	vm_object_reference:
206 *
207 *	Gets another reference to the given object.
208 */
209inline void
210vm_object_reference(object)
211	register vm_object_t	object;
212{
213	if (object == NULL)
214		return;
215
216	vm_object_lock(object);
217	object->ref_count++;
218	vm_object_unlock(object);
219}
220
221/*
222 *	vm_object_deallocate:
223 *
224 *	Release a reference to the specified object,
225 *	gained either through a vm_object_allocate
226 *	or a vm_object_reference call.  When all references
227 *	are gone, storage associated with this object
228 *	may be relinquished.
229 *
230 *	No object may be locked.
231 */
232void
233vm_object_deallocate(object)
234	vm_object_t	object;
235{
236	vm_object_t	temp;
237
238	while (object != NULL) {
239
240		/*
241		 *	The cache holds a reference (uncounted) to
242		 *	the object; we must lock it before removing
243		 *	the object.
244		 */
245
246		vm_object_cache_lock();
247
248		/*
249		 *	Lose the reference
250		 */
251		vm_object_lock(object);
252		if (--(object->ref_count) != 0) {
253
254			vm_object_unlock(object);
255			/*
256			 *	If there are still references, then
257			 *	we are done.
258			 */
259			vm_object_cache_unlock();
260			return;
261		}
262
263		/*
264		 *	See if this object can persist.  If so, enter
265		 *	it in the cache, then deactivate all of its
266		 *	pages.
267		 */
268
269		if (object->flags & OBJ_CANPERSIST) {
270
271			TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
272				cached_list);
273			vm_object_cached++;
274			vm_object_cache_unlock();
275
276/*
277 * this code segment was removed because it kills performance with
278 * large -- repetively used binaries.  The functionality now resides
279 * in the pageout daemon
280 *			vm_object_deactivate_pages(object);
281 */
282			vm_object_unlock(object);
283
284			vm_object_cache_trim();
285			return;
286		}
287
288		/*
289		 *	Make sure no one can look us up now.
290		 */
291		vm_object_remove(object->pager);
292		vm_object_cache_unlock();
293
294		temp = object->shadow;
295		vm_object_terminate(object);
296			/* unlocks and deallocates object */
297		object = temp;
298	}
299}
300
301/*
302 *	vm_object_terminate actually destroys the specified object, freeing
303 *	up all previously used resources.
304 *
305 *	The object must be locked.
306 */
307void
308vm_object_terminate(object)
309	register vm_object_t	object;
310{
311	register vm_page_t	p;
312	vm_object_t		shadow_object;
313	int s;
314
315	/*
316	 *	Detach the object from its shadow if we are the shadow's
317	 *	copy.
318	 */
319	if ((shadow_object = object->shadow) != NULL) {
320		vm_object_lock(shadow_object);
321		if (shadow_object->copy == object)
322			shadow_object->copy = NULL;
323/*
324		else if (shadow_object->copy != NULL)
325			panic("vm_object_terminate: copy/shadow inconsistency");
326*/
327		vm_object_unlock(shadow_object);
328	}
329
330	/*
331	 *	Wait until the pageout daemon is through
332	 *	with the object.
333	 */
334
335	while (object->paging_in_progress) {
336		vm_object_sleep((int)object, object, FALSE);
337		vm_object_lock(object);
338	}
339
340	/*
341	 *	While the paging system is locked,
342	 *	pull the object's pages off the active
343	 *	and inactive queues.  This keeps the
344	 *	pageout daemon from playing with them
345	 *	during vm_pager_deallocate.
346	 *
347	 *	We can't free the pages yet, because the
348	 *	object's pager may have to write them out
349	 *	before deallocating the paging space.
350	 */
351
352	for( p = object->memq.tqh_first; p; p=p->listq.tqe_next) {
353		VM_PAGE_CHECK(p);
354
355		vm_page_lock_queues();
356		s = splhigh();
357		if (p->flags & PG_ACTIVE) {
358			TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
359			p->flags &= ~PG_ACTIVE;
360			cnt.v_active_count--;
361		}
362
363		if (p->flags & PG_INACTIVE) {
364			TAILQ_REMOVE(&vm_page_queue_inactive, p, pageq);
365			p->flags &= ~PG_INACTIVE;
366			cnt.v_inactive_count--;
367		}
368		splx(s);
369		vm_page_unlock_queues();
370	}
371
372	vm_object_unlock(object);
373
374	if (object->paging_in_progress != 0)
375		panic("vm_object_deallocate: pageout in progress");
376
377	/*
378	 *	Clean and free the pages, as appropriate.
379	 *	All references to the object are gone,
380	 *	so we don't need to lock it.
381	 */
382
383	if ((object->flags & OBJ_INTERNAL) == 0) {
384		vm_object_lock(object);
385		(void) vm_object_page_clean(object, 0, 0, TRUE, TRUE);
386		vm_object_unlock(object);
387	}
388
389	/*
390	 * Now free the pages.
391	 * For internal objects, this also removes them from paging queues.
392	 */
393	while ((p = object->memq.tqh_first) != NULL) {
394		VM_PAGE_CHECK(p);
395		vm_page_lock_queues();
396		vm_page_free(p);
397		cnt.v_pfree++;
398		vm_page_unlock_queues();
399	}
400
401	/*
402	 *	Let the pager know object is dead.
403	 */
404
405	if (object->pager != NULL)
406		vm_pager_deallocate(object->pager);
407
408
409	simple_lock(&vm_object_list_lock);
410	TAILQ_REMOVE(&vm_object_list, object, object_list);
411	vm_object_count--;
412	simple_unlock(&vm_object_list_lock);
413
414	/*
415	 *	Free the space for the object.
416	 */
417
418	free((caddr_t)object, M_VMOBJ);
419}
420
421/*
422 *	vm_object_page_clean
423 *
424 *	Clean all dirty pages in the specified range of object.
425 *	Leaves page on whatever queue it is currently on.
426 *
427 *	Odd semantics: if start == end, we clean everything.
428 *
429 *	The object must be locked.
430 */
431#if 1
432boolean_t
433vm_object_page_clean(object, start, end, syncio, de_queue)
434	register vm_object_t	object;
435	register vm_offset_t	start;
436	register vm_offset_t	end;
437	boolean_t		syncio;
438	boolean_t		de_queue;
439{
440	register vm_page_t	p, nextp;
441	int s;
442	int size;
443
444	if (object->pager == NULL)
445		return 1;
446
447	if (start != end) {
448		start = trunc_page(start);
449		end = round_page(end);
450	}
451	size = end - start;
452
453again:
454	/*
455	 * Wait until the pageout daemon is through with the object.
456	 */
457	while (object->paging_in_progress) {
458		vm_object_sleep((int)object, object, FALSE);
459	}
460
461	nextp = object->memq.tqh_first;
462	while ( (p = nextp) && ((start == end) || (size != 0) ) ) {
463		nextp = p->listq.tqe_next;
464		if (start == end || (p->offset >= start && p->offset < end)) {
465			if (p->flags & PG_BUSY)
466				continue;
467
468			size -= PAGE_SIZE;
469
470			if ((p->flags & PG_CLEAN)
471				 && pmap_is_modified(VM_PAGE_TO_PHYS(p)))
472				p->flags &= ~PG_CLEAN;
473
474			if ((p->flags & PG_CLEAN) == 0) {
475				vm_pageout_clean(p,VM_PAGEOUT_FORCE);
476				goto again;
477			}
478		}
479	}
480	wakeup((caddr_t)object);
481	return 1;
482}
483#endif
484/*
485 *	vm_object_page_clean
486 *
487 *	Clean all dirty pages in the specified range of object.
488 *	If syncio is TRUE, page cleaning is done synchronously.
489 *	If de_queue is TRUE, pages are removed from any paging queue
490 *	they were on, otherwise they are left on whatever queue they
491 *	were on before the cleaning operation began.
492 *
493 *	Odd semantics: if start == end, we clean everything.
494 *
495 *	The object must be locked.
496 *
497 *	Returns TRUE if all was well, FALSE if there was a pager error
498 *	somewhere.  We attempt to clean (and dequeue) all pages regardless
499 *	of where an error occurs.
500 */
501#if 0
502boolean_t
503vm_object_page_clean(object, start, end, syncio, de_queue)
504	register vm_object_t	object;
505	register vm_offset_t	start;
506	register vm_offset_t	end;
507	boolean_t		syncio;
508	boolean_t		de_queue;
509{
510	register vm_page_t	p;
511	int onqueue;
512	boolean_t noerror = TRUE;
513
514	if (object == NULL)
515		return (TRUE);
516
517	/*
518	 * If it is an internal object and there is no pager, attempt to
519	 * allocate one.  Note that vm_object_collapse may relocate one
520	 * from a collapsed object so we must recheck afterward.
521	 */
522	if ((object->flags & OBJ_INTERNAL) && object->pager == NULL) {
523		vm_object_collapse(object);
524		if (object->pager == NULL) {
525			vm_pager_t pager;
526
527			vm_object_unlock(object);
528			pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
529						  object->size, VM_PROT_ALL,
530						  (vm_offset_t)0);
531			if (pager)
532				vm_object_setpager(object, pager, 0, FALSE);
533			vm_object_lock(object);
534		}
535	}
536	if (object->pager == NULL)
537		return (FALSE);
538
539again:
540	/*
541	 * Wait until the pageout daemon is through with the object.
542	 */
543	while (object->paging_in_progress) {
544		vm_object_sleep((int)object, object, FALSE);
545		vm_object_lock(object);
546	}
547	/*
548	 * Loop through the object page list cleaning as necessary.
549	 */
550	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
551		onqueue = 0;
552		if ((start == end || p->offset >= start && p->offset < end) &&
553		    !(p->flags & PG_FICTITIOUS)) {
554			if ((p->flags & PG_CLEAN) &&
555			    pmap_is_modified(VM_PAGE_TO_PHYS(p)))
556				p->flags &= ~PG_CLEAN;
557			/*
558			 * Remove the page from any paging queue.
559			 * This needs to be done if either we have been
560			 * explicitly asked to do so or it is about to
561			 * be cleaned (see comment below).
562			 */
563			if (de_queue || !(p->flags & PG_CLEAN)) {
564				vm_page_lock_queues();
565				if (p->flags & PG_ACTIVE) {
566					TAILQ_REMOVE(&vm_page_queue_active,
567						     p, pageq);
568					p->flags &= ~PG_ACTIVE;
569					cnt.v_active_count--;
570					onqueue = 1;
571				} else if (p->flags & PG_INACTIVE) {
572					TAILQ_REMOVE(&vm_page_queue_inactive,
573						     p, pageq);
574					p->flags &= ~PG_INACTIVE;
575					cnt.v_inactive_count--;
576					onqueue = -1;
577				} else
578					onqueue = 0;
579				vm_page_unlock_queues();
580			}
581			/*
582			 * To ensure the state of the page doesn't change
583			 * during the clean operation we do two things.
584			 * First we set the busy bit and write-protect all
585			 * mappings to ensure that write accesses to the
586			 * page block (in vm_fault).  Second, we remove
587			 * the page from any paging queue to foil the
588			 * pageout daemon (vm_pageout_scan).
589			 */
590			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
591			if (!(p->flags & PG_CLEAN)) {
592				p->flags |= PG_BUSY;
593				object->paging_in_progress++;
594				vm_object_unlock(object);
595				/*
596				 * XXX if put fails we mark the page as
597				 * clean to avoid an infinite loop.
598				 * Will loose changes to the page.
599				 */
600				if (vm_pager_put(object->pager, p, syncio)) {
601					printf("%s: pager_put error\n",
602					       "vm_object_page_clean");
603					p->flags |= PG_CLEAN;
604					noerror = FALSE;
605				}
606				vm_object_lock(object);
607				object->paging_in_progress--;
608				if (!de_queue && onqueue) {
609					vm_page_lock_queues();
610					if (onqueue > 0)
611						vm_page_activate(p);
612					else
613						vm_page_deactivate(p);
614					vm_page_unlock_queues();
615				}
616				PAGE_WAKEUP(p);
617				goto again;
618			}
619		}
620	}
621	return (noerror);
622}
623#endif
624
625/*
626 *	vm_object_deactivate_pages
627 *
628 *	Deactivate all pages in the specified object.  (Keep its pages
629 *	in memory even though it is no longer referenced.)
630 *
631 *	The object must be locked.
632 */
633void
634vm_object_deactivate_pages(object)
635	register vm_object_t	object;
636{
637	register vm_page_t	p, next;
638
639	for (p = object->memq.tqh_first; p != NULL; p = next) {
640		next = p->listq.tqe_next;
641		vm_page_lock_queues();
642		vm_page_deactivate(p);
643		vm_page_unlock_queues();
644	}
645}
646
647/*
648 *	Trim the object cache to size.
649 */
650void
651vm_object_cache_trim()
652{
653	register vm_object_t	object;
654
655	vm_object_cache_lock();
656	while (vm_object_cached > vm_cache_max) {
657		object = vm_object_cached_list.tqh_first;
658		vm_object_cache_unlock();
659
660		if (object != vm_object_lookup(object->pager))
661			panic("vm_object_deactivate: I'm sooo confused.");
662
663		pager_cache(object, FALSE);
664
665		vm_object_cache_lock();
666	}
667	vm_object_cache_unlock();
668}
669
670
671/*
672 *	vm_object_pmap_copy:
673 *
674 *	Makes all physical pages in the specified
675 *	object range copy-on-write.  No writeable
676 *	references to these pages should remain.
677 *
678 *	The object must *not* be locked.
679 */
680void vm_object_pmap_copy(object, start, end)
681	register vm_object_t	object;
682	register vm_offset_t	start;
683	register vm_offset_t	end;
684{
685	register vm_page_t	p;
686
687	if (object == NULL)
688		return;
689
690	vm_object_lock(object);
691	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
692		if ((start <= p->offset) && (p->offset < end)) {
693			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
694			p->flags |= PG_COPYONWRITE;
695		}
696	}
697	vm_object_unlock(object);
698}
699
700/*
701 *	vm_object_pmap_remove:
702 *
703 *	Removes all physical pages in the specified
704 *	object range from all physical maps.
705 *
706 *	The object must *not* be locked.
707 */
708void
709vm_object_pmap_remove(object, start, end)
710	register vm_object_t	object;
711	register vm_offset_t	start;
712	register vm_offset_t	end;
713{
714	register vm_page_t	p;
715	int s;
716
717	if (object == NULL)
718		return;
719
720	vm_object_lock(object);
721again:
722	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
723		if ((start <= p->offset) && (p->offset < end)) {
724			s = splhigh();
725			if (p->flags & PG_BUSY) {
726				p->flags |= PG_WANTED;
727				tsleep((caddr_t) p, PVM, "vmopmr", 0);
728				splx(s);
729				goto again;
730			}
731			splx(s);
732			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
733			if ((p->flags & PG_CLEAN) == 0)
734				p->flags |= PG_LAUNDRY;
735		}
736	}
737	vm_object_unlock(object);
738}
739
740/*
741 *	vm_object_copy:
742 *
743 *	Create a new object which is a copy of an existing
744 *	object, and mark all of the pages in the existing
745 *	object 'copy-on-write'.  The new object has one reference.
746 *	Returns the new object.
747 *
748 *	May defer the copy until later if the object is not backed
749 *	up by a non-default pager.
750 */
751void vm_object_copy(src_object, src_offset, size,
752		    dst_object, dst_offset, src_needs_copy)
753	register vm_object_t	src_object;
754	vm_offset_t		src_offset;
755	vm_size_t		size;
756	vm_object_t		*dst_object;	/* OUT */
757	vm_offset_t		*dst_offset;	/* OUT */
758	boolean_t		*src_needs_copy;	/* OUT */
759{
760	register vm_object_t	new_copy;
761	register vm_object_t	old_copy;
762	vm_offset_t		new_start, new_end;
763
764	register vm_page_t	p;
765
766	if (src_object == NULL) {
767		/*
768		 *	Nothing to copy
769		 */
770		*dst_object = NULL;
771		*dst_offset = 0;
772		*src_needs_copy = FALSE;
773		return;
774	}
775
776
777	/*
778	 *	If the object's pager is null_pager or the
779	 *	default pager, we don't have to make a copy
780	 *	of it.  Instead, we set the needs copy flag and
781	 *	make a shadow later.
782	 */
783
784	vm_object_lock(src_object);
785
786	/*
787	 *	Try to collapse the object before copying it.
788	 */
789
790	vm_object_collapse(src_object);
791
792	if (src_object->pager == NULL ||
793	    src_object->pager->pg_type == PG_SWAP ||
794	    (src_object->flags & OBJ_INTERNAL)) {
795
796		/*
797		 *	Make another reference to the object
798		 */
799		src_object->ref_count++;
800
801		/*
802		 *	Mark all of the pages copy-on-write.
803		 */
804		for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next)
805			if (src_offset <= p->offset &&
806			    p->offset < src_offset + size)
807				p->flags |= PG_COPYONWRITE;
808		vm_object_unlock(src_object);
809
810		*dst_object = src_object;
811		*dst_offset = src_offset;
812
813		/*
814		 *	Must make a shadow when write is desired
815		 */
816		*src_needs_copy = TRUE;
817		return;
818	}
819
820
821	/*
822	 *	If the object has a pager, the pager wants to
823	 *	see all of the changes.  We need a copy-object
824	 *	for the changed pages.
825	 *
826	 *	If there is a copy-object, and it is empty,
827	 *	no changes have been made to the object since the
828	 *	copy-object was made.  We can use the same copy-
829	 *	object.
830	 */
831
832    Retry1:
833	old_copy = src_object->copy;
834	if (old_copy != NULL) {
835		/*
836		 *	Try to get the locks (out of order)
837		 */
838		if (!vm_object_lock_try(old_copy)) {
839			vm_object_unlock(src_object);
840
841			/* should spin a bit here... */
842			vm_object_lock(src_object);
843			goto Retry1;
844		}
845
846		if (old_copy->resident_page_count == 0 &&
847		    old_copy->pager == NULL) {
848			/*
849			 *	Return another reference to
850			 *	the existing copy-object.
851			 */
852			old_copy->ref_count++;
853			vm_object_unlock(old_copy);
854			vm_object_unlock(src_object);
855			*dst_object = old_copy;
856			*dst_offset = src_offset;
857			*src_needs_copy = FALSE;
858			return;
859		}
860		vm_object_unlock(old_copy);
861	}
862	vm_object_unlock(src_object);
863
864	/*
865	 *	If the object has a pager, the pager wants
866	 *	to see all of the changes.  We must make
867	 *	a copy-object and put the changed pages there.
868	 *
869	 *	The copy-object is always made large enough to
870	 *	completely shadow the original object, since
871	 *	it may have several users who want to shadow
872	 *	the original object at different points.
873	 */
874
875	new_copy = vm_object_allocate(src_object->size);
876
877    Retry2:
878	vm_object_lock(src_object);
879	/*
880	 *	Copy object may have changed while we were unlocked
881	 */
882	old_copy = src_object->copy;
883	if (old_copy != NULL) {
884		/*
885		 *	Try to get the locks (out of order)
886		 */
887		if (!vm_object_lock_try(old_copy)) {
888			vm_object_unlock(src_object);
889			goto Retry2;
890		}
891
892		/*
893		 *	Consistency check
894		 */
895		if (old_copy->shadow != src_object ||
896		    old_copy->shadow_offset != (vm_offset_t) 0)
897			panic("vm_object_copy: copy/shadow inconsistency");
898
899		/*
900		 *	Make the old copy-object shadow the new one.
901		 *	It will receive no more pages from the original
902		 *	object.
903		 */
904
905		src_object->ref_count--;	/* remove ref. from old_copy */
906		old_copy->shadow = new_copy;
907		new_copy->ref_count++;		/* locking not needed - we
908						   have the only pointer */
909		vm_object_unlock(old_copy);	/* done with old_copy */
910	}
911
912	new_start = (vm_offset_t) 0;	/* always shadow original at 0 */
913	new_end   = (vm_offset_t) new_copy->size; /* for the whole object */
914
915	/*
916	 *	Point the new copy at the existing object.
917	 */
918
919	new_copy->shadow = src_object;
920	new_copy->shadow_offset = new_start;
921	src_object->ref_count++;
922	src_object->copy = new_copy;
923
924	/*
925	 *	Mark all the affected pages of the existing object
926	 *	copy-on-write.
927	 */
928	for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next)
929		if ((new_start <= p->offset) && (p->offset < new_end))
930			p->flags |= PG_COPYONWRITE;
931
932	vm_object_unlock(src_object);
933
934	*dst_object = new_copy;
935	*dst_offset = src_offset - new_start;
936	*src_needs_copy = FALSE;
937}
938
939/*
940 *	vm_object_shadow:
941 *
942 *	Create a new object which is backed by the
943 *	specified existing object range.  The source
944 *	object reference is deallocated.
945 *
946 *	The new object and offset into that object
947 *	are returned in the source parameters.
948 */
949
950void
951vm_object_shadow(object, offset, length)
952	vm_object_t	*object;	/* IN/OUT */
953	vm_offset_t	*offset;	/* IN/OUT */
954	vm_size_t	length;
955{
956	register vm_object_t	source;
957	register vm_object_t	result;
958
959	source = *object;
960
961	/*
962	 *	Allocate a new object with the given length
963	 */
964
965	if ((result = vm_object_allocate(length)) == NULL)
966		panic("vm_object_shadow: no object for shadowing");
967
968	/*
969	 *	The new object shadows the source object, adding
970	 *	a reference to it.  Our caller changes his reference
971	 *	to point to the new object, removing a reference to
972	 *	the source object.  Net result: no change of reference
973	 *	count.
974	 */
975	result->shadow = source;
976
977	/*
978	 *	Store the offset into the source object,
979	 *	and fix up the offset into the new object.
980	 */
981
982	result->shadow_offset = *offset;
983
984	/*
985	 *	Return the new things
986	 */
987
988	*offset = 0;
989	*object = result;
990}
991
992/*
993 *	Set the specified object's pager to the specified pager.
994 */
995
996void
997vm_object_setpager(object, pager, paging_offset,
998			read_only)
999	vm_object_t	object;
1000	vm_pager_t	pager;
1001	vm_offset_t	paging_offset;
1002	boolean_t	read_only;
1003{
1004#ifdef	lint
1005	read_only++;	/* No longer used */
1006#endif	lint
1007
1008	vm_object_lock(object);			/* XXX ? */
1009	if (object->pager && object->pager != pager) {
1010		panic("!!!pager already allocated!!!\n");
1011	}
1012	object->pager = pager;
1013	object->paging_offset = paging_offset;
1014	vm_object_unlock(object);			/* XXX ? */
1015}
1016
1017/*
1018 *	vm_object_hash hashes the pager/id pair.
1019 */
1020
1021#define vm_object_hash(pager) \
1022	(((unsigned)pager >> 5)%VM_OBJECT_HASH_COUNT)
1023
1024/*
1025 *	vm_object_lookup looks in the object cache for an object with the
1026 *	specified pager and paging id.
1027 */
1028
1029vm_object_t vm_object_lookup(pager)
1030	vm_pager_t	pager;
1031{
1032	register vm_object_hash_entry_t	entry;
1033	vm_object_t			object;
1034
1035	vm_object_cache_lock();
1036
1037	for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first;
1038	     entry != NULL;
1039	     entry = entry->hash_links.tqe_next) {
1040		object = entry->object;
1041		if (object->pager == pager) {
1042			vm_object_lock(object);
1043			if (object->ref_count == 0) {
1044				TAILQ_REMOVE(&vm_object_cached_list, object,
1045					cached_list);
1046				vm_object_cached--;
1047			}
1048			object->ref_count++;
1049			vm_object_unlock(object);
1050			vm_object_cache_unlock();
1051			return(object);
1052		}
1053	}
1054
1055	vm_object_cache_unlock();
1056	return(NULL);
1057}
1058
1059/*
1060 *	vm_object_enter enters the specified object/pager/id into
1061 *	the hash table.
1062 */
1063
1064void vm_object_enter(object, pager)
1065	vm_object_t	object;
1066	vm_pager_t	pager;
1067{
1068	struct vm_object_hash_head	*bucket;
1069	register vm_object_hash_entry_t	entry;
1070
1071	/*
1072	 *	We don't cache null objects, and we can't cache
1073	 *	objects with the null pager.
1074	 */
1075
1076	if (object == NULL)
1077		return;
1078	if (pager == NULL)
1079		return;
1080
1081	bucket = &vm_object_hashtable[vm_object_hash(pager)];
1082	entry = (vm_object_hash_entry_t)
1083		malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK);
1084	entry->object = object;
1085	object->flags |= OBJ_CANPERSIST;
1086
1087	vm_object_cache_lock();
1088	TAILQ_INSERT_TAIL(bucket, entry, hash_links);
1089	vm_object_cache_unlock();
1090}
1091
1092/*
1093 *	vm_object_remove:
1094 *
1095 *	Remove the pager from the hash table.
1096 *	Note:  This assumes that the object cache
1097 *	is locked.  XXX this should be fixed
1098 *	by reorganizing vm_object_deallocate.
1099 */
1100void
1101vm_object_remove(pager)
1102	register vm_pager_t	pager;
1103{
1104	struct vm_object_hash_head	*bucket;
1105	register vm_object_hash_entry_t	entry;
1106	register vm_object_t		object;
1107
1108	bucket = &vm_object_hashtable[vm_object_hash(pager)];
1109
1110	for (entry = bucket->tqh_first;
1111	     entry != NULL;
1112	     entry = entry->hash_links.tqe_next) {
1113		object = entry->object;
1114		if (object->pager == pager) {
1115			TAILQ_REMOVE(bucket, entry, hash_links);
1116			free((caddr_t)entry, M_VMOBJHASH);
1117			break;
1118		}
1119	}
1120}
1121
1122boolean_t	vm_object_collapse_allowed = TRUE;
1123/*
1124 *	vm_object_collapse:
1125 *
1126 *	Collapse an object with the object backing it.
1127 *	Pages in the backing object are moved into the
1128 *	parent, and the backing object is deallocated.
1129 *
1130 *	Requires that the object be locked and the page
1131 *	queues be unlocked.
1132 *
1133 *	This routine has significant changes by John S. Dyson
1134 *	to fix some swap memory leaks.  18 Dec 93
1135 *
1136 */
1137void
1138vm_object_collapse(object)
1139	register vm_object_t	object;
1140
1141{
1142	register vm_object_t	backing_object;
1143	register vm_offset_t	backing_offset;
1144	register vm_size_t	size;
1145	register vm_offset_t	new_offset;
1146	register vm_page_t	p, pp;
1147
1148	if (!vm_object_collapse_allowed)
1149		return;
1150
1151	while (TRUE) {
1152		/*
1153		 *	Verify that the conditions are right for collapse:
1154		 *
1155		 *	The object exists and no pages in it are currently
1156		 *	being paged out.
1157		 */
1158		if (object == NULL ||
1159		    object->paging_in_progress != 0)
1160			return;
1161
1162		/*
1163		 *		There is a backing object, and
1164		 */
1165
1166		if ((backing_object = object->shadow) == NULL)
1167			return;
1168
1169		vm_object_lock(backing_object);
1170		/*
1171		 *	...
1172		 *		The backing object is not read_only,
1173		 *		and no pages in the backing object are
1174		 *		currently being paged out.
1175		 *		The backing object is internal.
1176		 */
1177
1178		if ((backing_object->flags & OBJ_INTERNAL) == 0 ||
1179		    backing_object->paging_in_progress != 0) {
1180			vm_object_unlock(backing_object);
1181			return;
1182		}
1183
1184		/*
1185		 *	The backing object can't be a copy-object:
1186		 *	the shadow_offset for the copy-object must stay
1187		 *	as 0.  Furthermore (for the 'we have all the
1188		 *	pages' case), if we bypass backing_object and
1189		 *	just shadow the next object in the chain, old
1190		 *	pages from that object would then have to be copied
1191		 *	BOTH into the (former) backing_object and into the
1192		 *	parent object.
1193		 */
1194		if (backing_object->shadow != NULL &&
1195		    backing_object->shadow->copy == backing_object) {
1196			vm_object_unlock(backing_object);
1197			return;
1198		}
1199
1200		/*
1201		 * we can deal only with the swap pager
1202		 */
1203		if ((object->pager &&
1204		    	object->pager->pg_type != PG_SWAP) ||
1205		    (backing_object->pager &&
1206		    	backing_object->pager->pg_type != PG_SWAP)) {
1207			vm_object_unlock(backing_object);
1208			return;
1209		}
1210
1211
1212		/*
1213		 *	We know that we can either collapse the backing
1214		 *	object (if the parent is the only reference to
1215		 *	it) or (perhaps) remove the parent's reference
1216		 *	to it.
1217		 */
1218
1219		backing_offset = object->shadow_offset;
1220		size = object->size;
1221
1222		/*
1223		 *	If there is exactly one reference to the backing
1224		 *	object, we can collapse it into the parent.
1225		 */
1226
1227		if (backing_object->ref_count == 1) {
1228
1229			/*
1230			 *	We can collapse the backing object.
1231			 *
1232			 *	Move all in-memory pages from backing_object
1233			 *	to the parent.  Pages that have been paged out
1234			 *	will be overwritten by any of the parent's
1235			 *	pages that shadow them.
1236			 */
1237
1238			while (p = backing_object->memq.tqh_first) {
1239
1240				new_offset = (p->offset - backing_offset);
1241
1242				/*
1243				 *	If the parent has a page here, or if
1244				 *	this page falls outside the parent,
1245				 *	dispose of it.
1246				 *
1247				 *	Otherwise, move it as planned.
1248				 */
1249
1250				if (p->offset < backing_offset ||
1251				    new_offset >= size) {
1252					vm_page_lock_queues();
1253					vm_page_free(p);
1254					vm_page_unlock_queues();
1255				} else {
1256				    pp = vm_page_lookup(object, new_offset);
1257				    if (pp != NULL || (object->pager && vm_pager_has_page(object->pager,
1258						object->paging_offset + new_offset))) {
1259					vm_page_lock_queues();
1260					vm_page_free(p);
1261					vm_page_unlock_queues();
1262				    } else {
1263					vm_page_rename(p, object, new_offset);
1264				    }
1265				}
1266			}
1267
1268			/*
1269			 *	Move the pager from backing_object to object.
1270			 */
1271
1272			if (backing_object->pager) {
1273				backing_object->paging_in_progress++;
1274				if (object->pager) {
1275					vm_pager_t bopager;
1276					object->paging_in_progress++;
1277					/*
1278					 * copy shadow object pages into ours
1279					 * and destroy unneeded pages in shadow object.
1280					 */
1281					bopager = backing_object->pager;
1282					backing_object->pager = NULL;
1283					vm_object_remove(backing_object->pager);
1284					swap_pager_copy(
1285						bopager, backing_object->paging_offset,
1286						object->pager, object->paging_offset,
1287						object->shadow_offset);
1288					object->paging_in_progress--;
1289					if (object->paging_in_progress == 0)
1290						wakeup((caddr_t)object);
1291				} else {
1292					object->paging_in_progress++;
1293					/*
1294					 * grab the shadow objects pager
1295					 */
1296					object->pager = backing_object->pager;
1297					object->paging_offset = backing_object->paging_offset + backing_offset;
1298					vm_object_remove(backing_object->pager);
1299					backing_object->pager = NULL;
1300					/*
1301					 * free unnecessary blocks
1302					 */
1303					swap_pager_freespace(object->pager, 0, object->paging_offset);
1304					object->paging_in_progress--;
1305					if (object->paging_in_progress == 0)
1306						wakeup((caddr_t)object);
1307				}
1308				backing_object->paging_in_progress--;
1309				if (backing_object->paging_in_progress == 0)
1310					wakeup((caddr_t)backing_object);
1311			}
1312
1313
1314			/*
1315			 *	Object now shadows whatever backing_object did.
1316			 *	Note that the reference to backing_object->shadow
1317			 *	moves from within backing_object to within object.
1318			 */
1319
1320			object->shadow = backing_object->shadow;
1321			object->shadow_offset += backing_object->shadow_offset;
1322			if (object->shadow != NULL &&
1323			    object->shadow->copy != NULL) {
1324				panic("vm_object_collapse: we collapsed a copy-object!");
1325			}
1326			/*
1327			 *	Discard backing_object.
1328			 *
1329			 *	Since the backing object has no pages, no
1330			 *	pager left, and no object references within it,
1331			 *	all that is necessary is to dispose of it.
1332			 */
1333
1334			vm_object_unlock(backing_object);
1335
1336			simple_lock(&vm_object_list_lock);
1337			TAILQ_REMOVE(&vm_object_list, backing_object,
1338				object_list);
1339			vm_object_count--;
1340			simple_unlock(&vm_object_list_lock);
1341
1342			free((caddr_t)backing_object, M_VMOBJ);
1343
1344			object_collapses++;
1345		}
1346		else {
1347			/*
1348			 *	If all of the pages in the backing object are
1349			 *	shadowed by the parent object, the parent
1350			 *	object no longer has to shadow the backing
1351			 *	object; it can shadow the next one in the
1352			 *	chain.
1353			 *
1354			 *	The backing object must not be paged out - we'd
1355			 *	have to check all of the paged-out pages, as
1356			 *	well.
1357			 */
1358
1359			if (backing_object->pager != NULL) {
1360				vm_object_unlock(backing_object);
1361				return;
1362			}
1363
1364			/*
1365			 *	Should have a check for a 'small' number
1366			 *	of pages here.
1367			 */
1368
1369			for( p = backing_object->memq.tqh_first;p;p=p->listq.tqe_next) {
1370				new_offset = (p->offset - backing_offset);
1371
1372				/*
1373				 *	If the parent has a page here, or if
1374				 *	this page falls outside the parent,
1375				 *	keep going.
1376				 *
1377				 *	Otherwise, the backing_object must be
1378				 *	left in the chain.
1379				 */
1380
1381				if (p->offset >= backing_offset &&
1382				    new_offset <= size &&
1383				    ((pp = vm_page_lookup(object, new_offset)) == NULL || (pp->flags & PG_FAKE)) &&
1384					(!object->pager || !vm_pager_has_page(object->pager, object->paging_offset+new_offset))) {
1385					/*
1386					 *	Page still needed.
1387					 *	Can't go any further.
1388					 */
1389					vm_object_unlock(backing_object);
1390					return;
1391				}
1392			}
1393
1394			/*
1395			 *	Make the parent shadow the next object
1396			 *	in the chain.  Deallocating backing_object
1397			 *	will not remove it, since its reference
1398			 *	count is at least 2.
1399			 */
1400
1401			vm_object_reference(object->shadow = backing_object->shadow);
1402			object->shadow_offset += backing_object->shadow_offset;
1403
1404			/*
1405			 *      Backing object might have had a copy pointer
1406			 *      to us.  If it did, clear it.
1407			 */
1408			if (backing_object->copy == object) {
1409				backing_object->copy = NULL;
1410			}
1411
1412			/*	Drop the reference count on backing_object.
1413			 *	Since its ref_count was at least 2, it
1414			 *	will not vanish; so we don't need to call
1415			 *	vm_object_deallocate.
1416			 */
1417			if (backing_object->ref_count == 1)
1418				printf("should have called obj deallocate\n");
1419			backing_object->ref_count--;
1420			vm_object_unlock(backing_object);
1421
1422			object_bypasses ++;
1423
1424		}
1425
1426		/*
1427		 *	Try again with this object's new backing object.
1428		 */
1429	}
1430}
1431
1432/*
1433 *	vm_object_page_remove: [internal]
1434 *
1435 *	Removes all physical pages in the specified
1436 *	object range from the object's list of pages.
1437 *
1438 *	The object must be locked.
1439 */
1440void
1441vm_object_page_remove(object, start, end)
1442	register vm_object_t	object;
1443	register vm_offset_t	start;
1444	register vm_offset_t	end;
1445{
1446	register vm_page_t	p, next;
1447	vm_offset_t size;
1448	int cnt;
1449	int s;
1450
1451	if (object == NULL)
1452		return;
1453
1454	start = trunc_page(start);
1455	end = round_page(end);
1456again:
1457	size = end-start;
1458	if (size > 4*PAGE_SIZE || size >= object->size/4) {
1459		for (p = object->memq.tqh_first; (p != NULL && size > 0); p = next) {
1460			next = p->listq.tqe_next;
1461			if ((start <= p->offset) && (p->offset < end)) {
1462				s=splhigh();
1463				if (p->flags & PG_BUSY) {
1464					p->flags |= PG_WANTED;
1465					tsleep((caddr_t) p, PVM, "vmopar", 0);
1466					splx(s);
1467					goto again;
1468				}
1469				splx(s);
1470				pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1471				vm_page_lock_queues();
1472				vm_page_free(p);
1473				vm_page_unlock_queues();
1474				size -= PAGE_SIZE;
1475			}
1476		}
1477	} else {
1478		while (size > 0) {
1479			while (p = vm_page_lookup(object, start)) {
1480				s = splhigh();
1481				if (p->flags & PG_BUSY) {
1482					p->flags |= PG_WANTED;
1483					tsleep((caddr_t) p, PVM, "vmopar", 0);
1484					splx(s);
1485					goto again;
1486				}
1487				splx(s);
1488				pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1489				vm_page_lock_queues();
1490				vm_page_free(p);
1491				vm_page_unlock_queues();
1492			}
1493			start += PAGE_SIZE;
1494			size -= PAGE_SIZE;
1495		}
1496	}
1497}
1498
1499/*
1500 *	Routine:	vm_object_coalesce
1501 *	Function:	Coalesces two objects backing up adjoining
1502 *			regions of memory into a single object.
1503 *
1504 *	returns TRUE if objects were combined.
1505 *
1506 *	NOTE:	Only works at the moment if the second object is NULL -
1507 *		if it's not, which object do we lock first?
1508 *
1509 *	Parameters:
1510 *		prev_object	First object to coalesce
1511 *		prev_offset	Offset into prev_object
1512 *		next_object	Second object into coalesce
1513 *		next_offset	Offset into next_object
1514 *
1515 *		prev_size	Size of reference to prev_object
1516 *		next_size	Size of reference to next_object
1517 *
1518 *	Conditions:
1519 *	The object must *not* be locked.
1520 */
1521boolean_t vm_object_coalesce(prev_object, next_object,
1522			prev_offset, next_offset,
1523			prev_size, next_size)
1524
1525	register vm_object_t	prev_object;
1526	vm_object_t	next_object;
1527	vm_offset_t	prev_offset, next_offset;
1528	vm_size_t	prev_size, next_size;
1529{
1530	vm_size_t	newsize;
1531
1532#ifdef	lint
1533	next_offset++;
1534#endif
1535
1536	if (next_object != NULL) {
1537		return(FALSE);
1538	}
1539
1540	if (prev_object == NULL) {
1541		return(TRUE);
1542	}
1543
1544	vm_object_lock(prev_object);
1545
1546	/*
1547	 *	Try to collapse the object first
1548	 */
1549	vm_object_collapse(prev_object);
1550
1551	/*
1552	 *	Can't coalesce if:
1553	 *	. more than one reference
1554	 *	. paged out
1555	 *	. shadows another object
1556	 *	. has a copy elsewhere
1557	 *	(any of which mean that the pages not mapped to
1558	 *	prev_entry may be in use anyway)
1559	 */
1560
1561	if (prev_object->ref_count > 1 ||
1562		prev_object->pager != NULL ||
1563		prev_object->shadow != NULL ||
1564		prev_object->copy != NULL) {
1565		vm_object_unlock(prev_object);
1566		return(FALSE);
1567	}
1568
1569	/*
1570	 *	Remove any pages that may still be in the object from
1571	 *	a previous deallocation.
1572	 */
1573
1574	vm_object_page_remove(prev_object,
1575			prev_offset + prev_size,
1576			prev_offset + prev_size + next_size);
1577
1578	/*
1579	 *	Extend the object if necessary.
1580	 */
1581	newsize = prev_offset + prev_size + next_size;
1582	if (newsize > prev_object->size)
1583		prev_object->size = newsize;
1584
1585	vm_object_unlock(prev_object);
1586	return(TRUE);
1587}
1588
1589/*
1590 * returns page after looking up in shadow chain
1591 */
1592
1593vm_page_t
1594vm_object_page_lookup(object, offset)
1595	vm_object_t object;
1596	vm_offset_t offset;
1597{
1598	vm_page_t m;
1599	if (!(m=vm_page_lookup(object, offset))) {
1600		if (!object->shadow)
1601			return 0;
1602		else
1603			return vm_object_page_lookup(object->shadow, offset + object->shadow_offset);
1604	}
1605	return m;
1606}
1607
1608#define DEBUG
1609#if defined(DEBUG) || (NDDB > 0)
1610/*
1611 *	vm_object_print:	[ debug ]
1612 */
1613void vm_object_print(object, full)
1614	vm_object_t	object;
1615	boolean_t	full;
1616{
1617	register vm_page_t	p;
1618	extern indent;
1619
1620	register int count;
1621
1622	if (object == NULL)
1623		return;
1624
1625	iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1626		(int) object, (int) object->size,
1627		object->resident_page_count, object->ref_count);
1628	printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n",
1629	       (int) object->pager, (int) object->paging_offset,
1630	       (int) object->shadow, (int) object->shadow_offset);
1631	printf("cache: next=0x%x, prev=0x%x\n",
1632	       object->cached_list.tqe_next, object->cached_list.tqe_prev);
1633
1634	if (!full)
1635		return;
1636
1637	indent += 2;
1638	count = 0;
1639	for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
1640		if (count == 0)
1641			iprintf("memory:=");
1642		else if (count == 6) {
1643			printf("\n");
1644			iprintf(" ...");
1645			count = 0;
1646		} else
1647			printf(",");
1648		count++;
1649
1650		printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p));
1651	}
1652	if (count != 0)
1653		printf("\n");
1654	indent -= 2;
1655}
1656#endif /* defined(DEBUG) || (NDDB > 0) */
1657