uvm_anon.c revision 1.33
1/*	$NetBSD: uvm_anon.c,v 1.33 2005/05/11 13:02:25 yamt Exp $	*/
2
3/*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Charles D. Cranor and
19 *      Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * uvm_anon.c: uvm anon ops
37 */
38
39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.33 2005/05/11 13:02:25 yamt Exp $");
41
42#include "opt_uvmhist.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/proc.h>
47#include <sys/malloc.h>
48#include <sys/pool.h>
49#include <sys/kernel.h>
50
51#include <uvm/uvm.h>
52#include <uvm/uvm_swap.h>
53
54static POOL_INIT(uvm_anon_pool, sizeof(struct vm_anon), 0, 0, 0, "anonpl",
55    &pool_allocator_nointr);
56static struct pool_cache uvm_anon_pool_cache;
57
58static int uvm_anon_ctor(void *, void *, int);
59
60/*
61 * allocate anons
62 */
63void
64uvm_anon_init()
65{
66
67	pool_cache_init(&uvm_anon_pool_cache, &uvm_anon_pool,
68	    uvm_anon_ctor, NULL, NULL);
69}
70
71static int
72uvm_anon_ctor(void *arg, void *object, int flags)
73{
74	struct vm_anon *anon = object;
75
76	anon->an_ref = 0;
77	simple_lock_init(&anon->an_lock);
78	anon->an_page = NULL;
79	anon->an_swslot = 0;
80
81	return 0;
82}
83
84/*
85 * allocate an anon
86 *
87 * => new anon is returned locked!
88 */
89struct vm_anon *
90uvm_analloc()
91{
92	struct vm_anon *anon;
93
94	anon = pool_cache_get(&uvm_anon_pool_cache, PR_NOWAIT);
95	if (anon) {
96		KASSERT(anon->an_ref == 0);
97		LOCK_ASSERT(simple_lock_held(&anon->an_lock) == 0);
98		KASSERT(anon->an_page == NULL);
99		KASSERT(anon->an_swslot == 0);
100		anon->an_ref = 1;
101		simple_lock(&anon->an_lock);
102	}
103	return anon;
104}
105
106/*
107 * uvm_anfree: free a single anon structure
108 *
109 * => caller must remove anon from its amap before calling (if it was in
110 *	an amap).
111 * => anon must be unlocked and have a zero reference count.
112 * => we may lock the pageq's.
113 */
114
115void
116uvm_anfree(anon)
117	struct vm_anon *anon;
118{
119	struct vm_page *pg;
120	UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
121	UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
122
123	KASSERT(anon->an_ref == 0);
124	LOCK_ASSERT(!simple_lock_held(&anon->an_lock));
125
126	/*
127	 * get page
128	 */
129
130	pg = anon->an_page;
131
132	/*
133	 * if there is a resident page and it is loaned, then anon may not
134	 * own it.   call out to uvm_anon_lockpage() to ensure the real owner
135 	 * of the page has been identified and locked.
136	 */
137
138	if (pg && pg->loan_count) {
139		simple_lock(&anon->an_lock);
140		pg = uvm_anon_lockloanpg(anon);
141		simple_unlock(&anon->an_lock);
142	}
143
144	/*
145	 * if we have a resident page, we must dispose of it before freeing
146	 * the anon.
147	 */
148
149	if (pg) {
150
151		/*
152		 * if the page is owned by a uobject (now locked), then we must
153		 * kill the loan on the page rather than free it.
154		 */
155
156		if (pg->uobject) {
157			uvm_lock_pageq();
158			KASSERT(pg->loan_count > 0);
159			pg->loan_count--;
160			pg->uanon = NULL;
161			uvm_unlock_pageq();
162			simple_unlock(&pg->uobject->vmobjlock);
163		} else {
164
165			/*
166			 * page has no uobject, so we must be the owner of it.
167			 */
168
169			KASSERT((pg->flags & PG_RELEASED) == 0);
170			simple_lock(&anon->an_lock);
171			pmap_page_protect(pg, VM_PROT_NONE);
172
173			/*
174			 * if the page is busy, mark it as PG_RELEASED
175			 * so that uvm_anon_release will release it later.
176			 */
177
178			if (pg->flags & PG_BUSY) {
179				pg->flags |= PG_RELEASED;
180				simple_unlock(&anon->an_lock);
181				return;
182			}
183			uvm_lock_pageq();
184			uvm_pagefree(pg);
185			uvm_unlock_pageq();
186			simple_unlock(&anon->an_lock);
187			UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: "
188				    "freed now!", anon, pg, 0, 0);
189		}
190	}
191	if (pg == NULL && anon->an_swslot > 0) {
192		/* this page is no longer only in swap. */
193		simple_lock(&uvm.swap_data_lock);
194		KASSERT(uvmexp.swpgonly > 0);
195		uvmexp.swpgonly--;
196		simple_unlock(&uvm.swap_data_lock);
197	}
198
199	/*
200	 * free any swap resources.
201	 */
202
203	uvm_anon_dropswap(anon);
204
205	/*
206	 * now that we've stripped the data areas from the anon,
207	 * free the anon itself.
208	 */
209
210	KASSERT(anon->an_page == NULL);
211	KASSERT(anon->an_swslot == 0);
212
213	pool_cache_put(&uvm_anon_pool_cache, anon);
214	UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
215}
216
217/*
218 * uvm_anon_dropswap:  release any swap resources from this anon.
219 *
220 * => anon must be locked or have a reference count of 0.
221 */
222void
223uvm_anon_dropswap(anon)
224	struct vm_anon *anon;
225{
226	UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
227
228	if (anon->an_swslot == 0)
229		return;
230
231	UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
232		    anon, anon->an_swslot, 0, 0);
233	uvm_swap_free(anon->an_swslot, 1);
234	anon->an_swslot = 0;
235}
236
237/*
238 * uvm_anon_lockloanpg: given a locked anon, lock its resident page
239 *
240 * => anon is locked by caller
241 * => on return: anon is locked
242 *		 if there is a resident page:
243 *			if it has a uobject, it is locked by us
244 *			if it is ownerless, we take over as owner
245 *		 we return the resident page (it can change during
246 *		 this function)
247 * => note that the only time an anon has an ownerless resident page
248 *	is if the page was loaned from a uvm_object and the uvm_object
249 *	disowned it
250 * => this only needs to be called when you want to do an operation
251 *	on an anon's resident page and that page has a non-zero loan
252 *	count.
253 */
254struct vm_page *
255uvm_anon_lockloanpg(anon)
256	struct vm_anon *anon;
257{
258	struct vm_page *pg;
259	boolean_t locked = FALSE;
260
261	LOCK_ASSERT(simple_lock_held(&anon->an_lock));
262
263	/*
264	 * loop while we have a resident page that has a non-zero loan count.
265	 * if we successfully get our lock, we will "break" the loop.
266	 * note that the test for pg->loan_count is not protected -- this
267	 * may produce false positive results.   note that a false positive
268	 * result may cause us to do more work than we need to, but it will
269	 * not produce an incorrect result.
270	 */
271
272	while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
273
274		/*
275		 * quickly check to see if the page has an object before
276		 * bothering to lock the page queues.   this may also produce
277		 * a false positive result, but that's ok because we do a real
278		 * check after that.
279		 */
280
281		if (pg->uobject) {
282			uvm_lock_pageq();
283			if (pg->uobject) {
284				locked =
285				    simple_lock_try(&pg->uobject->vmobjlock);
286			} else {
287				/* object disowned before we got PQ lock */
288				locked = TRUE;
289			}
290			uvm_unlock_pageq();
291
292			/*
293			 * if we didn't get a lock (try lock failed), then we
294			 * toggle our anon lock and try again
295			 */
296
297			if (!locked) {
298				simple_unlock(&anon->an_lock);
299
300				/*
301				 * someone locking the object has a chance to
302				 * lock us right now
303				 */
304
305				simple_lock(&anon->an_lock);
306				continue;
307			}
308		}
309
310		/*
311		 * if page is un-owned [i.e. the object dropped its ownership],
312		 * then we can take over as owner!
313		 */
314
315		if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
316			uvm_lock_pageq();
317			pg->pqflags |= PQ_ANON;
318			pg->loan_count--;
319			uvm_unlock_pageq();
320		}
321		break;
322	}
323	return(pg);
324}
325
326/*
327 * fetch an anon's page.
328 *
329 * => anon must be locked, and is unlocked upon return.
330 * => returns TRUE if pagein was aborted due to lack of memory.
331 */
332
333boolean_t
334uvm_anon_pagein(anon)
335	struct vm_anon *anon;
336{
337	struct vm_page *pg;
338	struct uvm_object *uobj;
339	int rv;
340
341	/* locked: anon */
342	LOCK_ASSERT(simple_lock_held(&anon->an_lock));
343
344	rv = uvmfault_anonget(NULL, NULL, anon);
345
346	/*
347	 * if rv == 0, anon is still locked, else anon
348	 * is unlocked
349	 */
350
351	switch (rv) {
352	case 0:
353		break;
354
355	case EIO:
356	case ERESTART:
357
358		/*
359		 * nothing more to do on errors.
360		 * ERESTART can only mean that the anon was freed,
361		 * so again there's nothing to do.
362		 */
363
364		return FALSE;
365
366	default:
367		return TRUE;
368	}
369
370	/*
371	 * ok, we've got the page now.
372	 * mark it as dirty, clear its swslot and un-busy it.
373	 */
374
375	pg = anon->an_page;
376	uobj = pg->uobject;
377	if (anon->an_swslot > 0)
378		uvm_swap_free(anon->an_swslot, 1);
379	anon->an_swslot = 0;
380	pg->flags &= ~(PG_CLEAN);
381
382	/*
383	 * deactivate the page (to put it on a page queue)
384	 */
385
386	pmap_clear_reference(pg);
387	uvm_lock_pageq();
388	if (pg->wire_count == 0)
389		uvm_pagedeactivate(pg);
390	uvm_unlock_pageq();
391
392	if (pg->flags & PG_WANTED) {
393		wakeup(pg);
394		pg->flags &= ~(PG_WANTED);
395	}
396
397	/*
398	 * unlock the anon and we're done.
399	 */
400
401	simple_unlock(&anon->an_lock);
402	if (uobj) {
403		simple_unlock(&uobj->vmobjlock);
404	}
405	return FALSE;
406}
407
408/*
409 * uvm_anon_release: release an anon and its page.
410 *
411 * => caller must lock the anon.
412 */
413
414void
415uvm_anon_release(anon)
416	struct vm_anon *anon;
417{
418	struct vm_page *pg = anon->an_page;
419
420	LOCK_ASSERT(simple_lock_held(&anon->an_lock));
421
422	KASSERT(pg != NULL);
423	KASSERT((pg->flags & PG_RELEASED) != 0);
424	KASSERT((pg->flags & PG_BUSY) != 0);
425	KASSERT(pg->uobject == NULL);
426	KASSERT(pg->uanon == anon);
427	KASSERT(pg->loan_count == 0);
428	KASSERT(anon->an_ref == 0);
429
430	uvm_lock_pageq();
431	uvm_pagefree(pg);
432	uvm_unlock_pageq();
433	simple_unlock(&anon->an_lock);
434
435	KASSERT(anon->an_page == NULL);
436
437	uvm_anfree(anon);
438}
439