vm_pager.c revision 20054
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_pager.c,v 1.24 1996/09/08 20:44:49 dyson Exp $
65 */
66
67/*
68 *	Paging space routine stubs.  Emulates a matchmaker-like interface
69 *	for builtin pagers.
70 */
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/proc.h>
75#include <sys/malloc.h>
76#include <sys/buf.h>
77#include <sys/ucred.h>
78
79#include <vm/vm.h>
80#include <vm/vm_param.h>
81#include <vm/vm_prot.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_kern.h>
85#include <vm/vm_pager.h>
86#include <vm/vm_extern.h>
87
88extern struct pagerops defaultpagerops;
89extern struct pagerops swappagerops;
90extern struct pagerops vnodepagerops;
91extern struct pagerops devicepagerops;
92
93static struct pagerops *pagertab[] = {
94	&defaultpagerops,	/* OBJT_DEFAULT */
95	&swappagerops,		/* OBJT_SWAP */
96	&vnodepagerops,		/* OBJT_VNODE */
97	&devicepagerops,	/* OBJT_DEVICE */
98};
99static int npagers = sizeof(pagertab) / sizeof(pagertab[0]);
100
101/*
102 * Kernel address space for mapping pages.
103 * Used by pagers where KVAs are needed for IO.
104 *
105 * XXX needs to be large enough to support the number of pending async
106 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
107 * (MAXPHYS == 64k) if you want to get the most efficiency.
108 */
109#define PAGER_MAP_SIZE	(8 * 1024 * 1024)
110
111int pager_map_size = PAGER_MAP_SIZE;
112vm_map_t pager_map;
113static int bswneeded;
114static vm_offset_t swapbkva;		/* swap buffers kva */
115
116void
117vm_pager_init()
118{
119	struct pagerops **pgops;
120
121	/*
122	 * Initialize known pagers
123	 */
124	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
125		if (pgops && ((*pgops)->pgo_init != NULL))
126			(*(*pgops)->pgo_init) ();
127}
128
129void
130vm_pager_bufferinit()
131{
132	struct buf *bp;
133	int i;
134
135	bp = swbuf;
136	/*
137	 * Now set up swap and physical I/O buffer headers.
138	 */
139	for (i = 0; i < nswbuf - 1; i++, bp++) {
140		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
141		bp->b_rcred = bp->b_wcred = NOCRED;
142		bp->b_vnbufs.le_next = NOLIST;
143	}
144	bp->b_rcred = bp->b_wcred = NOCRED;
145	bp->b_vnbufs.le_next = NOLIST;
146
147	swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS);
148	if (!swapbkva)
149		panic("Not enough pager_map VM space for physical buffers");
150}
151
152/*
153 * Allocate an instance of a pager of the given type.
154 * Size, protection and offset parameters are passed in for pagers that
155 * need to perform page-level validation (e.g. the device pager).
156 */
157vm_object_t
158vm_pager_allocate(type, handle, size, prot, off)
159	objtype_t type;
160	void *handle;
161	vm_size_t size;
162	vm_prot_t prot;
163	vm_ooffset_t off;
164{
165	struct pagerops *ops;
166
167	ops = pagertab[type];
168	if (ops)
169		return ((*ops->pgo_alloc) (handle, size, prot, off));
170	return (NULL);
171}
172
173void
174vm_pager_deallocate(object)
175	vm_object_t object;
176{
177	(*pagertab[object->type]->pgo_dealloc) (object);
178}
179
180
181int
182vm_pager_get_pages(object, m, count, reqpage)
183	vm_object_t object;
184	vm_page_t *m;
185	int count;
186	int reqpage;
187{
188	return ((*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage));
189}
190
191int
192vm_pager_put_pages(object, m, count, sync, rtvals)
193	vm_object_t object;
194	vm_page_t *m;
195	int count;
196	boolean_t sync;
197	int *rtvals;
198{
199	return ((*pagertab[object->type]->pgo_putpages)(object, m, count, sync, rtvals));
200}
201
202boolean_t
203vm_pager_has_page(object, offset, before, after)
204	vm_object_t object;
205	vm_pindex_t offset;
206	int *before;
207	int *after;
208{
209	return ((*pagertab[object->type]->pgo_haspage) (object, offset, before, after));
210}
211
212/*
213 * Called by pageout daemon before going back to sleep.
214 * Gives pagers a chance to clean up any completed async pageing operations.
215 */
216void
217vm_pager_sync()
218{
219	struct pagerops **pgops;
220
221	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
222		if (pgops && ((*pgops)->pgo_sync != NULL))
223			(*(*pgops)->pgo_sync) ();
224}
225
226vm_offset_t
227vm_pager_map_page(m)
228	vm_page_t m;
229{
230	vm_offset_t kva;
231
232	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
233	pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
234	return (kva);
235}
236
237void
238vm_pager_unmap_page(kva)
239	vm_offset_t kva;
240{
241	pmap_kremove(kva);
242	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
243}
244
245vm_object_t
246vm_pager_object_lookup(pg_list, handle)
247	register struct pagerlst *pg_list;
248	void *handle;
249{
250	register vm_object_t object;
251
252	for (object = TAILQ_FIRST(pg_list); object != NULL; object = TAILQ_NEXT(object,pager_object_list))
253		if (object->handle == handle)
254			return (object);
255	return (NULL);
256}
257
258/*
259 * This routine loses a reference to the object -
260 * thus a reference must be gained before calling.
261 */
262int
263pager_cache(object, should_cache)
264	vm_object_t object;
265	boolean_t should_cache;
266{
267	if (object == NULL)
268		return (KERN_INVALID_ARGUMENT);
269
270	if (should_cache)
271		object->flags |= OBJ_CANPERSIST;
272	else
273		object->flags &= ~OBJ_CANPERSIST;
274
275	vm_object_deallocate(object);
276
277	return (KERN_SUCCESS);
278}
279
280/*
281 * initialize a physical buffer
282 */
283
284static void
285initpbuf(struct buf *bp) {
286	bzero(bp, sizeof *bp);
287	bp->b_rcred = NOCRED;
288	bp->b_wcred = NOCRED;
289	bp->b_qindex = QUEUE_NONE;
290	bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva;
291	bp->b_kvabase = bp->b_data;
292	bp->b_kvasize = MAXPHYS;
293	bp->b_vnbufs.le_next = NOLIST;
294}
295
296/*
297 * allocate a physical buffer
298 */
299struct buf *
300getpbuf()
301{
302	int s;
303	struct buf *bp;
304
305	s = splbio();
306	/* get a bp from the swap buffer header pool */
307	while ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
308		bswneeded = 1;
309		tsleep(&bswneeded, PVM, "wswbuf", 0);
310	}
311	TAILQ_REMOVE(&bswlist, bp, b_freelist);
312	splx(s);
313
314	initpbuf(bp);
315	return bp;
316}
317
318/*
319 * allocate a physical buffer, if one is available
320 */
321struct buf *
322trypbuf()
323{
324	int s;
325	struct buf *bp;
326
327	s = splbio();
328	if ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
329		splx(s);
330		return NULL;
331	}
332	TAILQ_REMOVE(&bswlist, bp, b_freelist);
333	splx(s);
334
335	initpbuf(bp);
336
337	return bp;
338}
339
340/*
341 * release a physical buffer
342 */
343void
344relpbuf(bp)
345	struct buf *bp;
346{
347	int s;
348
349	s = splbio();
350
351	if (bp->b_rcred != NOCRED) {
352		crfree(bp->b_rcred);
353		bp->b_rcred = NOCRED;
354	}
355	if (bp->b_wcred != NOCRED) {
356		crfree(bp->b_wcred);
357		bp->b_wcred = NOCRED;
358	}
359	if (bp->b_vp)
360		pbrelvp(bp);
361
362	if (bp->b_flags & B_WANTED)
363		wakeup(bp);
364
365	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
366
367	if (bswneeded) {
368		bswneeded = 0;
369		wakeup(&bswneeded);
370	}
371	splx(s);
372}
373