vm_pager.c revision 7008
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_pager.c,v 1.12 1995/01/10 07:32:51 davidg Exp $
65 */
66
67/*
68 *	Paging space routine stubs.  Emulates a matchmaker-like interface
69 *	for builtin pagers.
70 */
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/proc.h>
75#include <sys/malloc.h>
76#include <sys/buf.h>
77#include <sys/ucred.h>
78
79#include <vm/vm.h>
80#include <vm/vm_page.h>
81#include <vm/vm_kern.h>
82
83extern struct pagerops swappagerops;
84extern struct pagerops vnodepagerops;
85extern struct pagerops devicepagerops;
86
87struct pagerops *pagertab[] = {
88	&swappagerops,		/* PG_SWAP */
89	&vnodepagerops,		/* PG_VNODE */
90	&devicepagerops,	/* PG_DEV */
91};
92int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
93
94struct pagerops *dfltpagerops = NULL;	/* default pager */
95
96/*
97 * Kernel address space for mapping pages.
98 * Used by pagers where KVAs are needed for IO.
99 *
100 * XXX needs to be large enough to support the number of pending async
101 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
102 * (MAXPHYS == 64k) if you want to get the most efficiency.
103 */
104#define PAGER_MAP_SIZE	(8 * 1024 * 1024)
105
106int pager_map_size = PAGER_MAP_SIZE;
107vm_map_t pager_map;
108boolean_t pager_map_wanted;
109vm_offset_t pager_sva, pager_eva;
110int bswneeded;
111vm_offset_t swapbkva;		/* swap buffers kva */
112
113void
114vm_pager_init()
115{
116	struct pagerops **pgops;
117
118	/*
119	 * Initialize known pagers
120	 */
121	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
122		if (pgops)
123			(*(*pgops)->pgo_init) ();
124	if (dfltpagerops == NULL)
125		panic("no default pager");
126}
127
128void
129vm_pager_bufferinit()
130{
131	struct buf *bp;
132	int i;
133
134	bp = swbuf;
135	/*
136	 * Now set up swap and physical I/O buffer headers.
137	 */
138	for (i = 0; i < nswbuf - 1; i++, bp++) {
139		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
140		bp->b_rcred = bp->b_wcred = NOCRED;
141		bp->b_vnbufs.le_next = NOLIST;
142	}
143	bp->b_rcred = bp->b_wcred = NOCRED;
144	bp->b_vnbufs.le_next = NOLIST;
145	bp->b_actf = NULL;
146
147	swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
148	if (!swapbkva)
149		panic("Not enough pager_map VM space for physical buffers");
150}
151
152/*
153 * Allocate an instance of a pager of the given type.
154 * Size, protection and offset parameters are passed in for pagers that
155 * need to perform page-level validation (e.g. the device pager).
156 */
157vm_pager_t
158vm_pager_allocate(type, handle, size, prot, off)
159	int type;
160	caddr_t handle;
161	vm_size_t size;
162	vm_prot_t prot;
163	vm_offset_t off;
164{
165	struct pagerops *ops;
166
167	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
168	if (ops)
169		return ((*ops->pgo_alloc) (handle, size, prot, off));
170	return (NULL);
171}
172
173void
174vm_pager_deallocate(pager)
175	vm_pager_t pager;
176{
177	if (pager == NULL)
178		panic("vm_pager_deallocate: null pager");
179
180	(*pager->pg_ops->pgo_dealloc) (pager);
181}
182
183
184int
185vm_pager_get_pages(pager, m, count, reqpage, sync)
186	vm_pager_t pager;
187	vm_page_t *m;
188	int count;
189	int reqpage;
190	boolean_t sync;
191{
192	int i;
193
194	if (pager == NULL) {
195		for (i = 0; i < count; i++) {
196			if (i != reqpage) {
197				PAGE_WAKEUP(m[i]);
198				vm_page_free(m[i]);
199			}
200		}
201		vm_page_zero_fill(m[reqpage]);
202		return VM_PAGER_OK;
203	}
204	if (pager->pg_ops->pgo_getpages == 0) {
205		for (i = 0; i < count; i++) {
206			if (i != reqpage) {
207				PAGE_WAKEUP(m[i]);
208				vm_page_free(m[i]);
209			}
210		}
211		return (VM_PAGER_GET(pager, m[reqpage], sync));
212	} else {
213		return (VM_PAGER_GET_MULTI(pager, m, count, reqpage, sync));
214	}
215}
216
217int
218vm_pager_put_pages(pager, m, count, sync, rtvals)
219	vm_pager_t pager;
220	vm_page_t *m;
221	int count;
222	boolean_t sync;
223	int *rtvals;
224{
225	int i;
226
227	if (pager->pg_ops->pgo_putpages)
228		return (VM_PAGER_PUT_MULTI(pager, m, count, sync, rtvals));
229	else {
230		for (i = 0; i < count; i++) {
231			rtvals[i] = VM_PAGER_PUT(pager, m[i], sync);
232		}
233		return rtvals[0];
234	}
235}
236
237boolean_t
238vm_pager_has_page(pager, offset)
239	vm_pager_t pager;
240	vm_offset_t offset;
241{
242	if (pager == NULL)
243		panic("vm_pager_has_page: null pager");
244	return ((*pager->pg_ops->pgo_haspage) (pager, offset));
245}
246
247/*
248 * Called by pageout daemon before going back to sleep.
249 * Gives pagers a chance to clean up any completed async pageing operations.
250 */
251void
252vm_pager_sync()
253{
254	struct pagerops **pgops;
255
256	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
257		if (pgops)
258			(*(*pgops)->pgo_putpage) (NULL, NULL, 0);
259}
260
261#if 0
262void
263vm_pager_cluster(pager, offset, loff, hoff)
264	vm_pager_t pager;
265	vm_offset_t offset;
266	vm_offset_t *loff;
267	vm_offset_t *hoff;
268{
269	if (pager == NULL)
270		panic("vm_pager_cluster: null pager");
271	return ((*pager->pg_ops->pgo_cluster) (pager, offset, loff, hoff));
272}
273#endif
274
275vm_offset_t
276vm_pager_map_page(m)
277	vm_page_t m;
278{
279	vm_offset_t kva;
280
281	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
282	pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
283	return (kva);
284}
285
286void
287vm_pager_unmap_page(kva)
288	vm_offset_t kva;
289{
290	pmap_kremove(kva);
291	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
292}
293
294vm_page_t
295vm_pager_atop(kva)
296	vm_offset_t kva;
297{
298	vm_offset_t pa;
299
300	pa = pmap_kextract(kva);
301	if (pa == 0)
302		panic("vm_pager_atop");
303	return (PHYS_TO_VM_PAGE(pa));
304}
305
306vm_pager_t
307vm_pager_lookup(pglist, handle)
308	register struct pagerlst *pglist;
309	caddr_t handle;
310{
311	register vm_pager_t pager;
312
313	for (pager = pglist->tqh_first; pager; pager = pager->pg_list.tqe_next)
314		if (pager->pg_handle == handle)
315			return (pager);
316	return (NULL);
317}
318
319/*
320 * This routine loses a reference to the object -
321 * thus a reference must be gained before calling.
322 */
323int
324pager_cache(object, should_cache)
325	vm_object_t object;
326	boolean_t should_cache;
327{
328	if (object == NULL)
329		return (KERN_INVALID_ARGUMENT);
330
331	vm_object_cache_lock();
332	vm_object_lock(object);
333	if (should_cache)
334		object->flags |= OBJ_CANPERSIST;
335	else
336		object->flags &= ~OBJ_CANPERSIST;
337	vm_object_unlock(object);
338	vm_object_cache_unlock();
339
340	vm_object_deallocate(object);
341
342	return (KERN_SUCCESS);
343}
344
345/*
346 * allocate a physical buffer
347 */
348struct buf *
349getpbuf()
350{
351	int s;
352	struct buf *bp;
353
354	s = splbio();
355	/* get a bp from the swap buffer header pool */
356	while ((bp = bswlist.tqh_first) == NULL) {
357		bswneeded = 1;
358		tsleep((caddr_t) &bswneeded, PVM, "wswbuf", 0);
359	}
360	TAILQ_REMOVE(&bswlist, bp, b_freelist);
361	splx(s);
362
363	bzero(bp, sizeof *bp);
364	bp->b_rcred = NOCRED;
365	bp->b_wcred = NOCRED;
366	bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
367	bp->b_vnbufs.le_next = NOLIST;
368	return bp;
369}
370
371/*
372 * allocate a physical buffer, if one is available
373 */
374struct buf *
375trypbuf()
376{
377	int s;
378	struct buf *bp;
379
380	s = splbio();
381	if ((bp = bswlist.tqh_first) == NULL) {
382		splx(s);
383		return NULL;
384	}
385	TAILQ_REMOVE(&bswlist, bp, b_freelist);
386	splx(s);
387
388	bzero(bp, sizeof *bp);
389	bp->b_rcred = NOCRED;
390	bp->b_wcred = NOCRED;
391	bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
392	bp->b_vnbufs.le_next = NOLIST;
393	return bp;
394}
395
396/*
397 * release a physical buffer
398 */
399void
400relpbuf(bp)
401	struct buf *bp;
402{
403	int s;
404
405	s = splbio();
406
407	if (bp->b_rcred != NOCRED) {
408		crfree(bp->b_rcred);
409		bp->b_rcred = NOCRED;
410	}
411	if (bp->b_wcred != NOCRED) {
412		crfree(bp->b_wcred);
413		bp->b_wcred = NOCRED;
414	}
415	if (bp->b_vp)
416		pbrelvp(bp);
417
418	if (bp->b_flags & B_WANTED)
419		wakeup((caddr_t) bp);
420
421	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
422
423	if (bswneeded) {
424		bswneeded = 0;
425		wakeup((caddr_t) &bswlist);
426	}
427	splx(s);
428}
429