uvm_loan.c revision 1.12
1/*	$NetBSD: uvm_loan.c,v 1.12 1998/11/04 07:07:22 chs Exp $	*/
2
3/*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 *	   >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7/*
8 *
9 * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *      This product includes software developed by Charles D. Cranor and
23 *      Washington University.
24 * 4. The name of the author may not be used to endorse or promote products
25 *    derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp
39 */
40
41/*
42 * uvm_loan.c: page loanout handler
43 */
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/proc.h>
49#include <sys/malloc.h>
50#include <sys/mman.h>
51
52#include <vm/vm.h>
53#include <vm/vm_page.h>
54#include <vm/vm_kern.h>
55
56#include <uvm/uvm.h>
57
58/*
59 * "loaned" pages are pages which are (read-only, copy-on-write) loaned
60 * from the VM system to other parts of the kernel.   this allows page
61 * copying to be avoided (e.g. you can loan pages from objs/anons to
62 * the mbuf system).
63 *
64 * there are 3 types of loans possible:
65 *  O->K  uvm_object page to wired kernel page (e.g. mbuf data area)
66 *  A->K  anon page to kernel wired kernel page (e.g. mbuf data area)
67 *  O->A  uvm_object to anon loan (e.g. vnode page to an anon)
68 * note that it possible to have an O page loaned to both an A and K
69 * at the same time.
70 *
71 * loans are tracked by pg->loan_count.  an O->A page will have both
72 * a uvm_object and a vm_anon, but PQ_ANON will not be set.   this sort
73 * of page is considered "owned" by the uvm_object (not the anon).
74 *
75 * each loan of a page to a wired kernel page bumps the pg->wire_count.
76 * wired kernel mappings should be entered with pmap_kenter functions
77 * so that pmap_page_protect() will not affect the kernel mappings.
78 * (this requires the PMAP_NEW interface...).
79 *
80 * owners that want to free their pages and discover that they are
81 * loaned out simply "disown" them (the page becomes an orphan).  these
82 * pages should be freed when the last loan is dropped.   in some cases
83 * an anon may "adopt" an orphaned page.
84 *
85 * locking: to read pg->loan_count either the owner or the page queues
86 * must be locked.   to modify pg->loan_count, both the owner of the page
87 * and the PQs must be locked.   pg->flags is (as always) locked by
88 * the owner of the page.
89 *
90 * note that locking from the "loaned" side is tricky since the object
91 * getting the loaned page has no reference to the page's owner and thus
92 * the owner could "die" at any time.   in order to prevent the owner
93 * from dying the page queues should be locked.   this forces us to sometimes
94 * use "try" locking.
95 *
96 * loans are typically broken by the following events:
97 *  1. write fault to a loaned page
98 *  2. pageout of clean+inactive O->A loaned page
99 *  3. owner frees page (e.g. pager flush)
100 *
101 * note that loaning a page causes all mappings of the page to become
102 * read-only (via pmap_page_protect).   this could have an unexpected
103 * effect on normal "wired" pages if one is not careful.
104 */
105
106/*
107 * local prototypes
108 */
109
110static int	uvm_loananon __P((struct uvm_faultinfo *, void ***,
111				int, struct vm_anon *));
112static int	uvm_loanentry __P((struct uvm_faultinfo *, void ***, int));
113static int	uvm_loanuobj __P((struct uvm_faultinfo *, void ***,
114				int, vaddr_t));
115static int	uvm_loanzero __P((struct uvm_faultinfo *, void ***, int));
116
117/*
118 * inlines
119 */
120
121/*
122 * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan())
123 *
124 * => "ufi" is the result of a successful map lookup (meaning that
125 *	the maps are locked by the caller)
126 * => we may unlock the maps if needed (for I/O)
127 * => we put our output result in "output"
128 * => we return the number of pages we loaned, or -1 if we had an error
129 */
130
131static __inline int
132uvm_loanentry(ufi, output, flags)
133	struct uvm_faultinfo *ufi;
134	void ***output;
135	int flags;
136{
137	vaddr_t curaddr = ufi->orig_rvaddr;
138	vsize_t togo = ufi->size;
139	struct vm_aref *aref = &ufi->entry->aref;
140	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
141	struct vm_anon *anon;
142	int rv, result = 0;
143
144	/*
145	 * lock us the rest of the way down
146	 */
147	if (aref->ar_amap)
148		simple_lock(&aref->ar_amap->am_l);
149	if (uobj)
150		simple_lock(&uobj->vmobjlock);
151
152	/*
153	 * loop until done
154	 */
155	while (togo) {
156
157		/*
158		 * find the page we want.   check the anon layer first.
159		 */
160
161		if (aref->ar_amap) {
162			anon = amap_lookup(aref, curaddr - ufi->entry->start);
163		} else {
164			anon = NULL;
165		}
166
167		if (anon) {
168			rv = uvm_loananon(ufi, output, flags, anon);
169		} else if (uobj) {
170			rv = uvm_loanuobj(ufi, output, flags, curaddr);
171		} else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
172			rv = uvm_loanzero(ufi, output, flags);
173		} else {
174			rv = -1;		/* null map entry... fail now */
175		}
176
177		/* total failure */
178		if (rv < 0)
179			return(-1);
180
181		/* relock failed, need to do another lookup */
182		if (rv == 0)
183			return(result);
184
185		/*
186		 * got it... advance to next page
187		 */
188		result++;
189		togo -= PAGE_SIZE;
190		curaddr += PAGE_SIZE;
191	}
192
193	/*
194	 * unlock everything and return
195	 */
196	uvmfault_unlockall(ufi, aref->ar_amap, uobj, NULL);
197	return(result);
198}
199
200/*
201 * normal functions
202 */
203
204/*
205 * uvm_loan: loan pages out to anons or to the kernel
206 *
207 * => map should be unlocked
208 * => start and len should be multiples of PAGE_SIZE
209 * => result is either an array of anon's or vm_pages (depending on flags)
210 * => flag values: UVM_LOAN_TOANON - loan to anons
211 *                 UVM_LOAN_TOPAGE - loan to wired kernel page
212 *    one and only one of these flags must be set!
213 */
214
215int
216uvm_loan(map, start, len, result, flags)
217	struct vm_map *map;
218	vaddr_t start;
219	vsize_t len;
220	void **result;
221	int flags;
222{
223	struct uvm_faultinfo ufi;
224	void **output;
225	int rv;
226
227	/*
228	 * ensure that one and only one of the flags is set
229	 */
230
231	if ((flags & (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE)) ==
232	    (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE) ||
233	    (flags & (UVM_LOAN_TOANON|UVM_LOAN_TOPAGE)) == 0)
234		return(KERN_FAILURE);
235
236	/*
237	 * "output" is a pointer to the current place to put the loaned
238	 * page...
239	 */
240
241	output = &result[0];	/* start at the beginning ... */
242
243	/*
244	 * while we've got pages to do
245	 */
246
247	while (len > 0) {
248
249		/*
250		 * fill in params for a call to uvmfault_lookup
251		 */
252
253		ufi.orig_map = map;
254		ufi.orig_rvaddr = start;
255		ufi.orig_size = len;
256
257		/*
258		 * do the lookup, the only time this will fail is if we hit on
259		 * an unmapped region (an error)
260		 */
261
262		if (!uvmfault_lookup(&ufi, FALSE))
263			goto fail;
264
265		/*
266		 * now do the loanout
267		 */
268		rv = uvm_loanentry(&ufi, &output, flags);
269		if (rv < 0)
270			goto fail;
271
272		/*
273		 * done!   advance pointers and unlock.
274		 */
275		rv <<= PAGE_SHIFT;
276		len -= rv;
277		start += rv;
278		uvmfault_unlockmaps(&ufi, FALSE);
279	}
280
281	/*
282	 * got it!   return success.
283	 */
284
285	return(KERN_SUCCESS);
286
287fail:
288	/*
289	 * fail: failed to do it.   drop our loans and return failure code.
290	 */
291	if (output - result) {
292		if (flags & UVM_LOAN_TOANON)
293			uvm_unloananon((struct vm_anon **)result,
294			    output - result);
295		else
296			uvm_unloanpage((struct vm_page **)result,
297			    output - result);
298	}
299	return(KERN_FAILURE);
300}
301
302/*
303 * uvm_loananon: loan a page from an anon out
304 *
305 * => return value:
306 *	-1 = fatal error, everything is unlocked, abort.
307 *	 0 = lookup in ufi went stale, everything unlocked, relookup and
308 *		try again
309 *	 1 = got it, everything still locked
310 */
311
312int
313uvm_loananon(ufi, output, flags, anon)
314	struct uvm_faultinfo *ufi;
315	void ***output;
316	int flags;
317	struct vm_anon *anon;
318{
319	struct vm_page *pg;
320	int result;
321
322	/*
323	 * if we are loaning to another anon then it is easy, we just
324	 * bump the reference count on the current anon and return a
325	 * pointer to it.
326	 */
327	if (flags & UVM_LOAN_TOANON) {
328		simple_lock(&anon->an_lock);
329		pg = anon->u.an_page;
330		if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1)
331			/* read protect it */
332			pmap_page_protect(PMAP_PGARG(pg), VM_PROT_READ);
333		anon->an_ref++;
334		**output = anon;
335		*output = (*output) + 1;
336		simple_unlock(&anon->an_lock);
337		return(1);
338	}
339
340	/*
341	 * we are loaning to a kernel-page.   we need to get the page
342	 * resident so we can wire it.   uvmfault_anonget will handle
343	 * this for us.
344	 */
345
346	simple_lock(&anon->an_lock);
347	result = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);
348
349	/*
350	 * if we were unable to get the anon, then uvmfault_anonget has
351	 * unlocked everything and returned an error code.
352	 */
353
354	if (result != VM_PAGER_OK) {
355
356		/* need to refault (i.e. refresh our lookup) ? */
357		if (result == VM_PAGER_REFAULT)
358			return(0);
359
360		/* "try again"?   sleep a bit and retry ... */
361		if (result == VM_PAGER_AGAIN) {
362			tsleep((caddr_t)&lbolt, PVM, "loanagain", 0);
363			return(0);
364		}
365
366		/* otherwise flag it as an error */
367		return(-1);
368	}
369
370	/*
371	 * we have the page and its owner locked: do the loan now.
372	 */
373
374	pg = anon->u.an_page;
375	uvm_lock_pageq();
376	if (pg->loan_count == 0)
377		pmap_page_protect(PMAP_PGARG(pg), VM_PROT_READ);
378	pg->loan_count++;
379	uvm_pagewire(pg);	/* always wire it */
380	uvm_unlock_pageq();
381	**output = pg;
382	*output = (*output) + 1;
383
384	/* unlock anon and return success */
385	if (pg->uobject)
386		simple_unlock(&pg->uobject->vmobjlock);
387	simple_unlock(&anon->an_lock);
388	return(1);
389}
390
391/*
392 * uvm_loanuobj: loan a page from a uobj out
393 *
394 * => return value:
395 *	-1 = fatal error, everything is unlocked, abort.
396 *	 0 = lookup in ufi went stale, everything unlocked, relookup and
397 *		try again
398 *	 1 = got it, everything still locked
399 */
400
401int
402uvm_loanuobj(ufi, output, flags, va)
403	struct uvm_faultinfo *ufi;
404	void ***output;
405	int flags;
406	vaddr_t va;
407{
408	struct vm_amap *amap = ufi->entry->aref.ar_amap;
409	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
410	struct vm_page *pg;
411	struct vm_anon *anon;
412	int result, npages;
413	boolean_t locked;
414
415	/*
416	 * first we must make sure the page is resident.
417	 *
418	 * XXXCDC: duplicate code with uvm_fault().
419	 */
420
421	if (uobj->pgops->pgo_get) {
422		npages = 1;
423		pg = NULL;
424		result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
425		    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
426	} else {
427		result = VM_PAGER_ERROR;
428	}
429
430	/*
431	 * check the result of the locked pgo_get.  if there is a problem,
432	 * then we fail the loan.
433	 */
434
435	if (result != VM_PAGER_OK && result != VM_PAGER_UNLOCK) {
436		uvmfault_unlockall(ufi, amap, uobj, NULL);
437		return(-1);
438	}
439
440	/*
441	 * if we need to unlock for I/O, do so now.
442	 */
443
444	if (result == VM_PAGER_UNLOCK) {
445		uvmfault_unlockall(ufi, amap, NULL, NULL);
446
447		npages = 1;
448		/* locked: uobj */
449		result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
450		    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, 0);
451		/* locked: <nothing> */
452
453		/*
454		 * check for errors
455		 */
456
457		if (result != VM_PAGER_OK) {
458			 if (result == VM_PAGER_AGAIN) {
459				tsleep((caddr_t)&lbolt, PVM, "fltagain2", 0);
460				return(0); /* redo the lookup and try again */
461			}
462			return(-1);	/* total failure */
463		}
464
465		/*
466		 * pgo_get was a success.   attempt to relock everything.
467		 */
468
469		locked = uvmfault_relock(ufi);
470		if (locked && amap)
471			simple_lock(&amap->am_l);
472		simple_lock(&uobj->vmobjlock);
473
474		/*
475		 * verify that the page has not be released and re-verify
476		 * that amap slot is still free.   if there is a problem we
477		 * drop our lock (thus force a lookup refresh/retry).
478		 */
479
480		if ((pg->flags & PG_RELEASED) != 0 ||
481		    (locked && amap && amap_lookup(&ufi->entry->aref,
482		    ufi->orig_rvaddr - ufi->entry->start))) {
483
484			if (locked)
485				uvmfault_unlockall(ufi, amap, NULL, NULL);
486			locked = FALSE;
487		}
488
489		/*
490		 * didn't get the lock?   release the page and retry.
491		 */
492
493		if (locked == FALSE) {
494
495			if (pg->flags & PG_WANTED)
496				/* still holding object lock */
497				thread_wakeup(pg);
498
499			if (pg->flags & PG_RELEASED) {
500#ifdef DIAGNOSTIC
501				if (uobj->pgops->pgo_releasepg == NULL)
502			panic("uvm_loanuobj: object has no releasepg function");
503#endif
504				/* frees page */
505				if (uobj->pgops->pgo_releasepg(pg, NULL))
506					simple_unlock(&uobj->vmobjlock);
507				return (0);
508			}
509
510			uvm_lock_pageq();
511			uvm_pageactivate(pg); /* make sure it is in queues */
512			uvm_unlock_pageq();
513			pg->flags &= ~(PG_BUSY|PG_WANTED);
514			UVM_PAGE_OWN(pg, NULL);
515			simple_unlock(&uobj->vmobjlock);
516			return (0);
517		}
518	}
519
520	/*
521	 * at this point we have the page we want ("pg") marked PG_BUSY for us
522	 * and we have all data structures locked.   do the loanout.   page can
523	 * not be PG_RELEASED (we caught this above).
524	 */
525
526	if ((flags & UVM_LOAN_TOANON) == 0) {	/* loan to wired-kernel page? */
527		uvm_lock_pageq();
528		if (pg->loan_count == 0)
529			pmap_page_protect(PMAP_PGARG(pg), VM_PROT_READ);
530		pg->loan_count++;
531		uvm_pagewire(pg);
532		uvm_unlock_pageq();
533		**output = pg;
534		*output = (*output) + 1;
535		if (pg->flags & PG_WANTED)
536			thread_wakeup(pg);
537		pg->flags &= ~(PG_WANTED|PG_BUSY);
538		UVM_PAGE_OWN(pg, NULL);
539		return(1);		/* got it! */
540	}
541
542	/*
543	 * must be a loan to an anon.   check to see if there is already
544	 * an anon associated with this page.  if so, then just return
545	 * a reference to this object.   the page should already be
546	 * mapped read-only because it is already on loan.
547	 */
548
549	if (pg->uanon) {
550		anon = pg->uanon;
551		simple_lock(&anon->an_lock);
552		anon->an_ref++;
553		simple_unlock(&anon->an_lock);
554		**output = anon;
555		*output = (*output) + 1;
556		uvm_lock_pageq();
557		uvm_pageactivate(pg);	/* reactivate */
558		uvm_unlock_pageq();
559		if (pg->flags & PG_WANTED)
560			thread_wakeup(pg);
561		pg->flags &= ~(PG_WANTED|PG_BUSY);
562		UVM_PAGE_OWN(pg, NULL);
563		return(1);
564	}
565
566	/*
567	 * need to allocate a new anon
568	 */
569
570	anon = uvm_analloc();
571	if (anon == NULL) {		/* out of VM! */
572		if (pg->flags & PG_WANTED)
573			thread_wakeup(pg);
574		pg->flags &= ~(PG_WANTED|PG_BUSY);
575		UVM_PAGE_OWN(pg, NULL);
576		uvmfault_unlockall(ufi, amap, uobj, NULL);
577		return(-1);
578	}
579	anon->u.an_page = pg;
580	pg->uanon = anon;
581	uvm_lock_pageq();
582	if (pg->loan_count == 0)
583		pmap_page_protect(PMAP_PGARG(pg), VM_PROT_READ);
584	pg->loan_count++;
585	uvm_pageactivate(pg);
586	uvm_unlock_pageq();
587	**output = anon;
588	*output = (*output) + 1;
589	if (pg->flags & PG_WANTED)
590		thread_wakeup(pg);
591	pg->flags &= ~(PG_WANTED|PG_BUSY);
592	UVM_PAGE_OWN(pg, NULL);
593	return(1);
594}
595
596/*
597 * uvm_loanzero: "loan" a zero-fill page out
598 *
599 * => return value:
600 *	-1 = fatal error, everything is unlocked, abort.
601 *	 0 = lookup in ufi went stale, everything unlocked, relookup and
602 *		try again
603 *	 1 = got it, everything still locked
604 */
605
606int
607uvm_loanzero(ufi, output, flags)
608	struct uvm_faultinfo *ufi;
609	void ***output;
610	int flags;
611{
612	struct vm_anon *anon;
613	struct vm_page *pg;
614
615	if ((flags & UVM_LOAN_TOANON) == 0) {	/* loaning to kernel-page */
616
617		while ((pg = uvm_pagealloc(NULL, 0, NULL)) == NULL) {
618			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
619			    ufi->entry->object.uvm_obj, NULL);
620			uvm_wait("loanzero1");
621			if (!uvmfault_relock(ufi))
622				return(0);
623			if (ufi->entry->aref.ar_amap)
624				simple_lock(&ufi->entry->aref.ar_amap->am_l);
625			if (ufi->entry->object.uvm_obj)
626				simple_lock(
627				    &ufi->entry->object.uvm_obj->vmobjlock);
628			/* ... and try again */
629		}
630
631		/* got a page, zero it and return */
632		uvm_pagezero(pg);		/* clears PG_CLEAN */
633		pg->flags &= ~(PG_BUSY|PG_FAKE);
634		UVM_PAGE_OWN(pg, NULL);
635		**output = pg;
636		*output = (*output) + 1;
637		uvm_lock_pageq();
638		/* wire it as we are loaning to kernel-page */
639		uvm_pagewire(pg);
640		pg->loan_count = 1;
641		uvm_unlock_pageq();
642		return(1);
643	}
644
645	/* loaning to an anon */
646	while ((anon = uvm_analloc()) == NULL ||
647	    (pg = uvm_pagealloc(NULL, 0, anon)) == NULL) {
648
649		/* unlock everything */
650		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
651		       ufi->entry->object.uvm_obj, NULL);
652
653		/* out of swap causes us to fail */
654		if (anon == NULL)
655			return(-1);
656
657		uvm_anfree(anon);
658		uvm_wait("loanzero2");		/* wait for pagedaemon */
659
660		if (!uvmfault_relock(ufi))
661			/* map changed while unlocked, need relookup */
662			return (0);
663
664		/* relock everything else */
665		if (ufi->entry->aref.ar_amap)
666			simple_lock(&ufi->entry->aref.ar_amap->am_l);
667		if (ufi->entry->object.uvm_obj)
668			simple_lock(&ufi->entry->object.uvm_obj->vmobjlock);
669		/* ... and try again */
670	}
671
672	/* got a page, zero it and return */
673	uvm_pagezero(pg);		/* clears PG_CLEAN */
674	pg->flags &= ~(PG_BUSY|PG_FAKE);
675	UVM_PAGE_OWN(pg, NULL);
676	uvm_lock_pageq();
677	uvm_pageactivate(pg);
678	uvm_unlock_pageq();
679	**output = anon;
680	*output = (*output) + 1;
681	return(1);
682}
683
684
685/*
686 * uvm_unloananon: kill loans on anons (basically a normal ref drop)
687 *
688 * => we expect all our resources to be unlocked
689 */
690
691void
692uvm_unloananon(aloans, nanons)
693	struct vm_anon **aloans;
694	int nanons;
695{
696	struct vm_anon *anon;
697
698	while (nanons-- > 0) {
699		int refs;
700
701		anon = *aloans++;
702		simple_lock(&anon->an_lock);
703		refs = --anon->an_ref;
704		simple_unlock(&anon->an_lock);
705
706		if (refs == 0) {
707			uvm_anfree(anon);	/* last reference: kill anon */
708		}
709	}
710}
711
712/*
713 * uvm_unloanpage: kill loans on pages loaned out to the kernel
714 *
715 * => we expect all our resources to be unlocked
716 */
717
718void
719uvm_unloanpage(ploans, npages)
720	struct vm_page **ploans;
721	int npages;
722{
723	struct vm_page *pg;
724
725	uvm_lock_pageq();
726
727	while (npages-- > 0) {
728		pg = *ploans++;
729
730		if (pg->loan_count < 1)
731			panic("uvm_unloanpage: page %p isn't loaned", pg);
732
733		pg->loan_count--;		/* drop loan */
734		uvm_pageunwire(pg);		/* and wire */
735
736		/*
737		 * if page is unowned and we killed last loan, then we can
738		 * free it
739		 */
740		if (pg->loan_count == 0 && pg->uobject == NULL &&
741		    pg->uanon == NULL) {
742
743			if (pg->flags & PG_BUSY)
744	panic("uvm_unloanpage: page %p unowned but PG_BUSY!", pg);
745
746			/* be safe */
747			pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
748			uvm_pagefree(pg);	/* pageq locked above */
749
750		}
751	}
752
753	uvm_unlock_pageq();
754}
755
756