uvm_loan.c revision 1.33
1/*	$NetBSD: uvm_loan.c,v 1.33 2001/09/22 05:58:04 jdolecek Exp $	*/
2
3/*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Charles D. Cranor and
19 *      Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp
35 */
36
37/*
38 * uvm_loan.c: page loanout handler
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/proc.h>
45#include <sys/malloc.h>
46#include <sys/mman.h>
47
48#include <uvm/uvm.h>
49
50/*
51 * "loaned" pages are pages which are (read-only, copy-on-write) loaned
52 * from the VM system to other parts of the kernel.   this allows page
53 * copying to be avoided (e.g. you can loan pages from objs/anons to
54 * the mbuf system).
55 *
56 * there are 3 types of loans possible:
57 *  O->K  uvm_object page to wired kernel page (e.g. mbuf data area)
58 *  A->K  anon page to wired kernel page (e.g. mbuf data area)
59 *  O->A  uvm_object to anon loan (e.g. vnode page to an anon)
60 * note that it possible to have an O page loaned to both an A and K
61 * at the same time.
62 *
63 * loans are tracked by pg->loan_count.  an O->A page will have both
64 * a uvm_object and a vm_anon, but PQ_ANON will not be set.   this sort
65 * of page is considered "owned" by the uvm_object (not the anon).
66 *
67 * each loan of a page to the kernel bumps the pg->wire_count.  the
68 * kernel mappings for these pages will be read-only and wired.  since
69 * the page will also be wired, it will not be a candidate for pageout,
70 * and thus will never be pmap_page_protect()'d with VM_PROT_NONE.  a
71 * write fault in the kernel to one of these pages will not cause
72 * copy-on-write.  instead, the page fault is considered fatal.  this
73 * is because the kernel mapping will have no way to look up the
74 * object/anon which the page is owned by.  this is a good side-effect,
75 * since a kernel write to a loaned page is an error.
76 *
77 * owners that want to free their pages and discover that they are
78 * loaned out simply "disown" them (the page becomes an orphan).  these
79 * pages should be freed when the last loan is dropped.   in some cases
80 * an anon may "adopt" an orphaned page.
81 *
82 * locking: to read pg->loan_count either the owner or the page queues
83 * must be locked.   to modify pg->loan_count, both the owner of the page
84 * and the PQs must be locked.   pg->flags is (as always) locked by
85 * the owner of the page.
86 *
87 * note that locking from the "loaned" side is tricky since the object
88 * getting the loaned page has no reference to the page's owner and thus
89 * the owner could "die" at any time.   in order to prevent the owner
90 * from dying the page queues should be locked.   this forces us to sometimes
91 * use "try" locking.
92 *
93 * loans are typically broken by the following events:
94 *  1. user-level xwrite fault to a loaned page
95 *  2. pageout of clean+inactive O->A loaned page
96 *  3. owner frees page (e.g. pager flush)
97 *
98 * note that loaning a page causes all mappings of the page to become
99 * read-only (via pmap_page_protect).   this could have an unexpected
100 * effect on normal "wired" pages if one is not careful (XXX).
101 */
102
103/*
104 * local prototypes
105 */
106
107static int	uvm_loananon __P((struct uvm_faultinfo *, void ***,
108				int, struct vm_anon *));
109static int	uvm_loanentry __P((struct uvm_faultinfo *, void ***, int));
110static int	uvm_loanuobj __P((struct uvm_faultinfo *, void ***,
111				int, vaddr_t));
112static int	uvm_loanzero __P((struct uvm_faultinfo *, void ***, int));
113static void	uvm_unloananon __P((struct vm_anon **, int, int));
114static void	uvm_unloanpage __P((struct vm_page **, int, int));
115
116
117/*
118 * inlines
119 */
120
121/*
122 * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan())
123 *
124 * => "ufi" is the result of a successful map lookup (meaning that
125 *	on entry the map is locked by the caller)
126 * => we may unlock and then relock the map if needed (for I/O)
127 * => we put our output result in "output"
128 * => we always return with the map unlocked
129 * => possible return values:
130 *	-1 == error, map is unlocked
131 *	 0 == map relock error (try again!), map is unlocked
132 *	>0 == number of pages we loaned, map is unlocked
133 */
134
135static __inline int
136uvm_loanentry(ufi, output, flags)
137	struct uvm_faultinfo *ufi;
138	void ***output;
139	int flags;
140{
141	vaddr_t curaddr = ufi->orig_rvaddr;
142	vsize_t togo = ufi->size;
143	struct vm_aref *aref = &ufi->entry->aref;
144	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
145	struct vm_anon *anon;
146	int rv, result = 0;
147
148	/*
149	 * lock us the rest of the way down (we unlock before return)
150	 */
151	if (aref->ar_amap)
152		amap_lock(aref->ar_amap);
153	if (uobj)
154		simple_lock(&uobj->vmobjlock);
155
156	/*
157	 * loop until done
158	 */
159	while (togo) {
160
161		/*
162		 * find the page we want.   check the anon layer first.
163		 */
164
165		if (aref->ar_amap) {
166			anon = amap_lookup(aref, curaddr - ufi->entry->start);
167		} else {
168			anon = NULL;
169		}
170
171		/* locked: map, amap, uobj */
172		if (anon) {
173			rv = uvm_loananon(ufi, output, flags, anon);
174		} else if (uobj) {
175			rv = uvm_loanuobj(ufi, output, flags, curaddr);
176		} else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
177			rv = uvm_loanzero(ufi, output, flags);
178		} else {
179			rv = -1;	/* null map entry... fail now */
180		}
181		/* locked: if (rv > 0) => map, amap, uobj  [o.w. unlocked] */
182
183		/* total failure */
184		if (rv < 0)
185			return(-1);		/* everything unlocked */
186
187		/* relock failed, need to do another lookup */
188		if (rv == 0)
189			return(result);		/* everything unlocked */
190
191		/*
192		 * got it... advance to next page
193		 */
194		result++;
195		togo -= PAGE_SIZE;
196		curaddr += PAGE_SIZE;
197	}
198
199	/*
200	 * unlock what we locked, unlock the maps and return
201	 */
202	if (aref->ar_amap)
203		amap_unlock(aref->ar_amap);
204	if (uobj)
205		simple_unlock(&uobj->vmobjlock);
206	uvmfault_unlockmaps(ufi, FALSE);
207	return(result);
208}
209
210/*
211 * normal functions
212 */
213
214/*
215 * uvm_loan: loan pages in a map out to anons or to the kernel
216 *
217 * => map should be unlocked
218 * => start and len should be multiples of PAGE_SIZE
219 * => result is either an array of anon's or vm_pages (depending on flags)
220 * => flag values: UVM_LOAN_TOANON - loan to anons
221 *                 UVM_LOAN_TOPAGE - loan to wired kernel page
222 *    one and only one of these flags must be set!
223 * => returns 0 (success), or an appropriate error number
224 */
225
226int
227uvm_loan(map, start, len, result, flags)
228	struct vm_map *map;
229	vaddr_t start;
230	vsize_t len;
231	void **result;
232	int flags;
233{
234	struct uvm_faultinfo ufi;
235	void **output;
236	int rv, error;
237
238	/*
239	 * ensure that one and only one of the flags is set
240	 */
241
242	KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^
243		((flags & UVM_LOAN_TOPAGE) == 0));
244	KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
245
246	/*
247	 * "output" is a pointer to the current place to put the loaned
248	 * page...
249	 */
250
251	output = &result[0];	/* start at the beginning ... */
252
253	/*
254	 * while we've got pages to do
255	 */
256
257	while (len > 0) {
258
259		/*
260		 * fill in params for a call to uvmfault_lookup
261		 */
262
263		ufi.orig_map = map;
264		ufi.orig_rvaddr = start;
265		ufi.orig_size = len;
266
267		/*
268		 * do the lookup, the only time this will fail is if we hit on
269		 * an unmapped region (an error)
270		 */
271
272		if (!uvmfault_lookup(&ufi, FALSE)) {
273			error = ENOENT;
274			goto fail;
275		}
276
277		/*
278		 * map now locked.  now do the loanout...
279		 */
280		rv = uvm_loanentry(&ufi, &output, flags);
281		if (rv < 0) {
282			/* all unlocked due to error */
283			error = EINVAL;
284			goto fail;
285		}
286
287		/*
288		 * done!  the map is unlocked.  advance, if possible.
289		 *
290		 * XXXCDC: could be recoded to hold the map lock with
291		 *	   smarter code (but it only happens on map entry
292		 *	   boundaries, so it isn't that bad).
293		 */
294		if (rv) {
295			rv <<= PAGE_SHIFT;
296			len -= rv;
297			start += rv;
298		}
299	}
300
301	/*
302	 * got it!   return success.
303	 */
304
305	return 0;
306
307fail:
308	/*
309	 * fail: failed to do it.   drop our loans and return failure code.
310	 * map is already unlocked.
311	 */
312	if (output - result) {
313		if (flags & UVM_LOAN_TOANON)
314			uvm_unloananon((struct vm_anon **)result,
315			    output - result, flags & UVM_LOAN_WIRED);
316		else
317			uvm_unloanpage((struct vm_page **)result,
318			    output - result, flags & UVM_LOAN_WIRED);
319	}
320	return (error);
321}
322
323/*
324 * uvm_loananon: loan a page from an anon out
325 *
326 * => called with map, amap, uobj locked
327 * => return value:
328 *	-1 = fatal error, everything is unlocked, abort.
329 *	 0 = lookup in ufi went stale, everything unlocked, relookup and
330 *		try again
331 *	 1 = got it, everything still locked
332 */
333
334int
335uvm_loananon(ufi, output, flags, anon)
336	struct uvm_faultinfo *ufi;
337	void ***output;
338	int flags;
339	struct vm_anon *anon;
340{
341	struct vm_page *pg;
342	int result;
343
344	/*
345	 * if we are loaning to "another" anon then it is easy, we just
346	 * bump the reference count on the current anon and return a
347	 * pointer to it (it becomes copy-on-write shared).
348	 */
349	if (flags & UVM_LOAN_TOANON) {
350		simple_lock(&anon->an_lock);
351		pg = anon->u.an_page;
352		/* if (in RAM) and (owned by this anon) and (only 1 ref) */
353		if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1)
354			/* write-protect it */
355			pmap_page_protect(pg, VM_PROT_READ);
356		anon->an_ref++;
357		**output = anon;
358		*output = (*output) + 1;
359		simple_unlock(&anon->an_lock);
360		return(1);
361	}
362
363	/*
364	 * we are loaning to a kernel-page.   we need to get the page
365	 * resident so we can wire it.   uvmfault_anonget will handle
366	 * this for us.
367	 */
368
369	simple_lock(&anon->an_lock);
370	result = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);
371
372	/*
373	 * if we were unable to get the anon, then uvmfault_anonget has
374	 * unlocked everything and returned an error code.
375	 */
376	if (result != 0) {
377
378		/* need to refault (i.e. refresh our lookup) ? */
379		if (result == ERESTART)
380			return(0);
381
382		/* "try again"?   sleep a bit and retry ... */
383		if (result == EAGAIN) {
384			tsleep(&lbolt, PVM, "loanagain", 0);
385			return(0);
386		}
387
388		/* otherwise flag it as an error */
389		return(-1);
390	}
391
392	/*
393	 * we have the page and its owner locked: do the loan now.
394	 */
395
396	pg = anon->u.an_page;
397	uvm_lock_pageq();
398	if (pg->loan_count == 0)
399		pmap_page_protect(pg, VM_PROT_READ);
400	pg->loan_count++;
401	/* If requested, wire */
402	if (flags & UVM_LOAN_WIRED)
403		uvm_pagewire(pg);
404	uvm_unlock_pageq();
405	**output = pg;
406	*output = (*output) + 1;
407
408	/* unlock anon and return success */
409	if (pg->uobject)	/* XXXCDC: what if this is our uobj? bad */
410		simple_unlock(&pg->uobject->vmobjlock);
411	simple_unlock(&anon->an_lock);
412	return(1);
413}
414
415/*
416 * uvm_loanuobj: loan a page from a uobj out
417 *
418 * => called with map, amap, uobj locked
419 * => return value:
420 *	-1 = fatal error, everything is unlocked, abort.
421 *	 0 = lookup in ufi went stale, everything unlocked, relookup and
422 *		try again
423 *	 1 = got it, everything still locked
424 */
425
426static int
427uvm_loanuobj(ufi, output, flags, va)
428	struct uvm_faultinfo *ufi;
429	void ***output;
430	int flags;
431	vaddr_t va;
432{
433	struct vm_amap *amap = ufi->entry->aref.ar_amap;
434	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
435	struct vm_page *pg;
436	struct vm_anon *anon;
437	int result, npages;
438	boolean_t locked;
439
440	/*
441	 * first we must make sure the page is resident.
442	 *
443	 * XXXCDC: duplicate code with uvm_fault().
444	 */
445
446	if (uobj->pgops->pgo_get) {	/* try locked pgo_get */
447		npages = 1;
448		pg = NULL;
449		result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
450		    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
451	} else {
452		result = EIO;		/* must have pgo_get op */
453	}
454
455	/*
456	 * check the result of the locked pgo_get.  if there is a problem,
457	 * then we fail the loan.
458	 */
459
460	if (result != 0 && result != EBUSY) {
461		uvmfault_unlockall(ufi, amap, uobj, NULL);
462		return(-1);
463	}
464
465	/*
466	 * if we need to unlock for I/O, do so now.
467	 */
468
469	if (result == EBUSY) {
470		uvmfault_unlockall(ufi, amap, NULL, NULL);
471		npages = 1;
472		/* locked: uobj */
473		result = uobj->pgops->pgo_get(uobj, va - ufi->entry->start,
474		    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO);
475		/* locked: <nothing> */
476
477		/*
478		 * check for errors
479		 */
480
481		if (result != 0) {
482			 if (result == EAGAIN) {
483				tsleep(&lbolt, PVM, "fltagain2", 0);
484				return(0); /* redo the lookup and try again */
485			}
486			return(-1);	/* total failure */
487		}
488
489		/*
490		 * pgo_get was a success.   attempt to relock everything.
491		 */
492
493		locked = uvmfault_relock(ufi);
494		if (locked && amap)
495			amap_lock(amap);
496		simple_lock(&uobj->vmobjlock);
497
498		/*
499		 * verify that the page has not be released and re-verify
500		 * that amap slot is still free.   if there is a problem we
501		 * drop our lock (thus force a lookup refresh/retry).
502		 */
503
504		if ((pg->flags & PG_RELEASED) != 0 ||
505		    (locked && amap && amap_lookup(&ufi->entry->aref,
506		    ufi->orig_rvaddr - ufi->entry->start))) {
507			if (locked)
508				uvmfault_unlockall(ufi, amap, NULL, NULL);
509			locked = FALSE;
510		}
511
512		/*
513		 * didn't get the lock?   release the page and retry.
514		 */
515
516		if (locked == FALSE) {
517			if (pg->flags & PG_WANTED) {
518				wakeup(pg);
519			}
520			if (pg->flags & PG_RELEASED) {
521				uvm_pagefree(pg);
522				return (0);
523			}
524			uvm_lock_pageq();
525			uvm_pageactivate(pg);
526			uvm_unlock_pageq();
527			pg->flags &= ~(PG_BUSY|PG_WANTED);
528			UVM_PAGE_OWN(pg, NULL);
529			simple_unlock(&uobj->vmobjlock);
530			return (0);
531		}
532	}
533
534	/*
535	 * at this point we have the page we want ("pg") marked PG_BUSY for us
536	 * and we have all data structures locked.   do the loanout.   page can
537	 * not be PG_RELEASED (we caught this above).
538	 */
539
540	if ((flags & UVM_LOAN_TOANON) == 0) { /* loan to wired-kernel page? */
541		uvm_lock_pageq();
542		if (pg->loan_count == 0)
543			pmap_page_protect(pg, VM_PROT_READ);
544		pg->loan_count++;
545		/* If requested, wire */
546		if (flags & UVM_LOAN_WIRED)
547			uvm_pagewire(pg);
548		uvm_unlock_pageq();
549		**output = pg;
550		*output = (*output) + 1;
551		if (pg->flags & PG_WANTED)
552			wakeup(pg);
553		pg->flags &= ~(PG_WANTED|PG_BUSY);
554		UVM_PAGE_OWN(pg, NULL);
555		return(1);		/* got it! */
556	}
557
558	/*
559	 * must be a loan to an anon.   check to see if there is already
560	 * an anon associated with this page.  if so, then just return
561	 * a reference to this object.   the page should already be
562	 * mapped read-only because it is already on loan.
563	 */
564
565	if (pg->uanon) {
566		anon = pg->uanon;
567		simple_lock(&anon->an_lock);
568		anon->an_ref++;
569		simple_unlock(&anon->an_lock);
570		**output = anon;
571		*output = (*output) + 1;
572		uvm_lock_pageq();
573		uvm_pageactivate(pg);	/* reactivate */
574		uvm_unlock_pageq();
575		if (pg->flags & PG_WANTED)
576			wakeup(pg);
577		pg->flags &= ~(PG_WANTED|PG_BUSY);
578		UVM_PAGE_OWN(pg, NULL);
579		return(1);
580	}
581
582	/*
583	 * need to allocate a new anon
584	 */
585
586	anon = uvm_analloc();
587	if (anon == NULL) {		/* out of VM! */
588		if (pg->flags & PG_WANTED)
589			wakeup(pg);
590		pg->flags &= ~(PG_WANTED|PG_BUSY);
591		UVM_PAGE_OWN(pg, NULL);
592		uvmfault_unlockall(ufi, amap, uobj, NULL);
593		return(-1);
594	}
595	/* anon is locked! */
596	anon->u.an_page = pg;
597	pg->uanon = anon;
598	uvm_lock_pageq();
599	if (pg->loan_count == 0)
600		pmap_page_protect(pg, VM_PROT_READ);
601	pg->loan_count++;
602	uvm_pageactivate(pg);
603	uvm_unlock_pageq();
604	**output = anon;
605	*output = (*output) + 1;
606	if (pg->flags & PG_WANTED)
607		wakeup(pg);
608	pg->flags &= ~(PG_WANTED|PG_BUSY);
609	UVM_PAGE_OWN(pg, NULL);
610	simple_unlock(&anon->an_lock);
611	return(1);
612}
613
614/*
615 * uvm_loanzero: "loan" a zero-fill page out
616 *
617 * => called with map, amap, uobj locked
618 * => return value:
619 *	-1 = fatal error, everything is unlocked, abort.
620 *	 0 = lookup in ufi went stale, everything unlocked, relookup and
621 *		try again
622 *	 1 = got it, everything still locked
623 */
624
625static int
626uvm_loanzero(ufi, output, flags)
627	struct uvm_faultinfo *ufi;
628	void ***output;
629	int flags;
630{
631	struct vm_anon *anon;
632	struct vm_page *pg;
633
634	if ((flags & UVM_LOAN_TOANON) == 0) {	/* loaning to kernel-page */
635
636		while ((pg = uvm_pagealloc(NULL, 0, NULL,
637		    UVM_PGA_ZERO)) == NULL) {
638			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
639			    ufi->entry->object.uvm_obj, NULL);
640			uvm_wait("loanzero1");
641			if (!uvmfault_relock(ufi))
642				return(0);
643			if (ufi->entry->aref.ar_amap)
644				amap_lock(ufi->entry->aref.ar_amap);
645			if (ufi->entry->object.uvm_obj)
646				simple_lock(
647				    &ufi->entry->object.uvm_obj->vmobjlock);
648			/* ... and try again */
649		}
650
651		/* got a zero'd page; return */
652		pg->flags &= ~(PG_BUSY|PG_FAKE);
653		UVM_PAGE_OWN(pg, NULL);
654		**output = pg;
655		*output = (*output) + 1;
656		uvm_lock_pageq();
657		/* If requested, wire */
658		if (flags & UVM_LOAN_WIRED)
659			uvm_pagewire(pg);
660		pg->loan_count = 1;
661		uvm_unlock_pageq();
662		return(1);
663	}
664
665	/* loaning to an anon */
666	while ((anon = uvm_analloc()) == NULL ||
667	    (pg = uvm_pagealloc(NULL, 0, anon, UVM_PGA_ZERO)) == NULL) {
668
669		/* unlock everything */
670		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
671		       ufi->entry->object.uvm_obj, anon);
672
673		/* out of swap causes us to fail */
674		if (anon == NULL)
675			return(-1);
676
677		/*
678		 * drop our reference; we're the only one,
679		 * so it's okay that the anon isn't locked
680		 * here.
681		 */
682		anon->an_ref--;
683
684		uvm_anfree(anon);
685		uvm_wait("loanzero2");		/* wait for pagedaemon */
686
687		if (!uvmfault_relock(ufi))
688			/* map changed while unlocked, need relookup */
689			return (0);
690
691		/* relock everything else */
692		if (ufi->entry->aref.ar_amap)
693			amap_lock(ufi->entry->aref.ar_amap);
694		if (ufi->entry->object.uvm_obj)
695			simple_lock(&ufi->entry->object.uvm_obj->vmobjlock);
696		/* ... and try again */
697	}
698
699	/* got a zero'd page; return */
700	pg->flags &= ~(PG_BUSY|PG_FAKE);
701	UVM_PAGE_OWN(pg, NULL);
702	uvm_lock_pageq();
703	uvm_pageactivate(pg);
704	uvm_unlock_pageq();
705	**output = anon;
706	*output = (*output) + 1;
707	return(1);
708}
709
710
711/*
712 * uvm_unloananon: kill loans on anons (basically a normal ref drop)
713 *
714 * => we expect all our resources to be unlocked
715 */
716
717static void
718uvm_unloananon(aloans, nanons, wired)
719	struct vm_anon **aloans;
720	int nanons, wired;
721{
722	struct vm_anon *anon;
723
724	while (nanons-- > 0) {
725		int refs;
726
727		anon = *aloans++;
728		simple_lock(&anon->an_lock);
729		refs = --anon->an_ref;
730		simple_unlock(&anon->an_lock);
731
732		if (refs == 0) {
733			uvm_anfree(anon);	/* last reference: kill anon */
734		}
735	}
736}
737
738/*
739 * uvm_unloanpage: kill loans on pages loaned out to the kernel
740 *
741 * => we expect all our resources to be unlocked
742 */
743
744static void
745uvm_unloanpage(ploans, npages, wired)
746	struct vm_page **ploans;
747	int npages, wired;
748{
749	struct vm_page *pg;
750
751	uvm_lock_pageq();
752
753	while (npages-- > 0) {
754		pg = *ploans++;
755
756		if (pg->loan_count < 1)
757			panic("uvm_unloanpage: page %p isn't loaned", pg);
758
759		pg->loan_count--;		/* drop loan */
760
761		if (wired)
762			uvm_pageunwire(pg);
763
764		/*
765		 * if page is unowned and we killed last loan, then we can
766		 * free it
767		 */
768		if (pg->loan_count == 0 && pg->uobject == NULL &&
769		    pg->uanon == NULL) {
770
771			if (pg->flags & PG_BUSY)
772	panic("uvm_unloanpage: page %p unowned but PG_BUSY!", pg);
773
774			/* be safe */
775			pmap_page_protect(pg, VM_PROT_NONE);
776			uvm_pagefree(pg);	/* pageq locked above */
777
778		}
779	}
780
781	uvm_unlock_pageq();
782}
783
784/*
785 * Unloan the memory.
786 */
787void
788uvm_unloan(void **result, int npages, int flags)
789{
790	if (flags & UVM_LOAN_TOANON)
791		uvm_unloananon((struct vm_anon **)result, npages,
792		    flags & UVM_LOAN_WIRED);
793	else
794		uvm_unloanpage((struct vm_page **)result,
795		    npages, flags & UVM_LOAN_WIRED);
796}
797