vm_pager.c revision 2112
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_pager.c	8.6 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_pager.c,v 1.6 1994/08/07 14:53:27 davidg Exp $
65 */
66
67/*
68 *	Paging space routine stubs.  Emulates a matchmaker-like interface
69 *	for builtin pagers.
70 */
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/malloc.h>
75#include <sys/buf.h>
76#include <sys/ucred.h>
77
78#include <vm/vm.h>
79#include <vm/vm_page.h>
80#include <vm/vm_kern.h>
81
82extern struct pagerops swappagerops;
83extern struct pagerops vnodepagerops;
84extern struct pagerops devicepagerops;
85
86struct pagerops *pagertab[] = {
87	&swappagerops,		/* PG_SWAP */
88	&vnodepagerops,		/* PG_VNODE */
89	&devicepagerops,	/* PG_DEV */
90};
91int npagers = sizeof (pagertab) / sizeof (pagertab[0]);
92
93struct pagerops *dfltpagerops = NULL;	/* default pager */
94
95/*
96 * Kernel address space for mapping pages.
97 * Used by pagers where KVAs are needed for IO.
98 *
99 * XXX needs to be large enough to support the number of pending async
100 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
101 * (MAXPHYS == 64k) if you want to get the most efficiency.
102 */
103#define PAGER_MAP_SIZE	(8 * 1024 * 1024)
104
105int pager_map_size = PAGER_MAP_SIZE;
106vm_map_t pager_map;
107boolean_t pager_map_wanted;
108vm_offset_t pager_sva, pager_eva;
109int bswneeded;
110vm_offset_t swapbkva;		/* swap buffers kva */
111
112void
113vm_pager_init()
114{
115	struct pagerops **pgops;
116
117	/*
118	 * Initialize known pagers
119	 */
120	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
121		if (pgops)
122			(*(*pgops)->pgo_init)();
123	if (dfltpagerops == NULL)
124		panic("no default pager");
125}
126
127void
128vm_pager_bufferinit()
129{
130	struct buf *bp;
131	int i;
132	bp = swbuf;
133	/*
134	 * Now set up swap and physical I/O buffer headers.
135	 */
136	for (i = 0; i < nswbuf - 1; i++, bp++) {
137		TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
138		bp->b_rcred = bp->b_wcred = NOCRED;
139		bp->b_vnbufs.le_next = NOLIST;
140	}
141	bp->b_rcred = bp->b_wcred = NOCRED;
142	bp->b_vnbufs.le_next = NOLIST;
143	bp->b_actf = NULL;
144
145	swapbkva = kmem_alloc_pageable( pager_map, nswbuf * MAXPHYS);
146	if( !swapbkva)
147		panic("Not enough pager_map VM space for physical buffers");
148}
149
150/*
151 * Allocate an instance of a pager of the given type.
152 * Size, protection and offset parameters are passed in for pagers that
153 * need to perform page-level validation (e.g. the device pager).
154 */
155vm_pager_t
156vm_pager_allocate(type, handle, size, prot, off)
157	int type;
158	caddr_t handle;
159	vm_size_t size;
160	vm_prot_t prot;
161	vm_offset_t off;
162{
163	struct pagerops *ops;
164
165	ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
166	if (ops)
167		return ((*ops->pgo_alloc)(handle, size, prot, off));
168	return (NULL);
169}
170
171void
172vm_pager_deallocate(pager)
173	vm_pager_t	pager;
174{
175	if (pager == NULL)
176		panic("vm_pager_deallocate: null pager");
177
178	(*pager->pg_ops->pgo_dealloc)(pager);
179}
180
181
182int
183vm_pager_get_pages(pager, m, count, reqpage, sync)
184	vm_pager_t	pager;
185	vm_page_t	*m;
186	int		count;
187	int		reqpage;
188	boolean_t	sync;
189{
190	extern int vm_pageout_count;
191	int i;
192
193	if (pager == NULL) {
194		for (i=0;i<count;i++) {
195			if( i != reqpage) {
196				PAGE_WAKEUP(m[i]);
197				vm_page_free(m[i]);
198			}
199		}
200		vm_page_zero_fill(m[reqpage]);
201		return VM_PAGER_OK;
202	}
203
204	if( pager->pg_ops->pgo_getpages == 0) {
205		for(i=0;i<count;i++) {
206			if( i != reqpage) {
207				PAGE_WAKEUP(m[i]);
208				vm_page_free(m[i]);
209			}
210		}
211		return(VM_PAGER_GET(pager, m[reqpage], sync));
212	} else {
213		return(VM_PAGER_GET_MULTI(pager, m, count, reqpage, sync));
214	}
215}
216
217int
218vm_pager_put_pages(pager, m, count, sync, rtvals)
219	vm_pager_t	pager;
220	vm_page_t	*m;
221	int		count;
222	boolean_t	sync;
223	int		*rtvals;
224{
225	int i;
226
227	if( pager->pg_ops->pgo_putpages)
228		return(VM_PAGER_PUT_MULTI(pager, m, count, sync, rtvals));
229	else {
230		for(i=0;i<count;i++) {
231			rtvals[i] = VM_PAGER_PUT( pager, m[i], sync);
232		}
233		return rtvals[0];
234	}
235}
236
237boolean_t
238vm_pager_has_page(pager, offset)
239	vm_pager_t	pager;
240	vm_offset_t	offset;
241{
242	if (pager == NULL)
243		panic("vm_pager_has_page: null pager");
244	return ((*pager->pg_ops->pgo_haspage)(pager, offset));
245}
246
247/*
248 * Called by pageout daemon before going back to sleep.
249 * Gives pagers a chance to clean up any completed async pageing operations.
250 */
251void
252vm_pager_sync()
253{
254	struct pagerops **pgops;
255
256	for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
257		if (pgops)
258			(*(*pgops)->pgo_putpage)(NULL, NULL, 0);
259}
260
261#if 0
262void
263vm_pager_cluster(pager, offset, loff, hoff)
264	vm_pager_t	pager;
265	vm_offset_t	offset;
266	vm_offset_t	*loff;
267	vm_offset_t	*hoff;
268{
269	if (pager == NULL)
270		panic("vm_pager_cluster: null pager");
271	return ((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff));
272}
273#endif
274
275vm_offset_t
276vm_pager_map_page(m)
277	vm_page_t	m;
278{
279	vm_offset_t kva;
280
281	kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
282	pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
283	return(kva);
284}
285
286void
287vm_pager_unmap_page(kva)
288	vm_offset_t	kva;
289{
290	pmap_kremove(kva);
291	kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
292}
293
294vm_page_t
295vm_pager_atop(kva)
296	vm_offset_t	kva;
297{
298	vm_offset_t pa;
299
300	pa = pmap_kextract( kva);
301	if (pa == 0)
302		panic("vm_pager_atop");
303	return (PHYS_TO_VM_PAGE(pa));
304}
305
306vm_pager_t
307vm_pager_lookup(pglist, handle)
308	register struct pagerlst *pglist;
309	caddr_t handle;
310{
311	register vm_pager_t pager;
312
313	for (pager = pglist->tqh_first; pager; pager = pager->pg_list.tqe_next)
314		if (pager->pg_handle == handle)
315			return (pager);
316	return (NULL);
317}
318
319/*
320 * This routine gains a reference to the object.
321 * Explicit deallocation is necessary.
322 */
323int
324pager_cache(object, should_cache)
325	vm_object_t	object;
326	boolean_t	should_cache;
327{
328	if (object == NULL)
329		return (KERN_INVALID_ARGUMENT);
330
331	vm_object_cache_lock();
332	vm_object_lock(object);
333	if (should_cache)
334		object->flags |= OBJ_CANPERSIST;
335	else
336		object->flags &= ~OBJ_CANPERSIST;
337	vm_object_unlock(object);
338	vm_object_cache_unlock();
339
340	vm_object_deallocate(object);
341
342	return (KERN_SUCCESS);
343}
344
345/*
346 * allocate a physical buffer
347 */
348struct buf *
349getpbuf() {
350	int s;
351	struct buf *bp;
352
353	s = splbio();
354	/* get a bp from the swap buffer header pool */
355	while ((bp = bswlist.tqh_first) == NULL) {
356		bswneeded = 1;
357		tsleep((caddr_t)&bswneeded, PVM, "wswbuf", 0);
358	}
359	TAILQ_REMOVE(&bswlist, bp, b_freelist);
360	splx(s);
361
362	bzero(bp, sizeof *bp);
363	bp->b_rcred = NOCRED;
364	bp->b_wcred = NOCRED;
365	bp->b_data = (caddr_t) (MAXPHYS * (bp-swbuf)) + swapbkva;
366	return bp;
367}
368
369/*
370 * allocate a physical buffer, if one is available
371 */
372struct buf *
373trypbuf() {
374	int s;
375	struct buf *bp;
376
377	s = splbio();
378	if ((bp = bswlist.tqh_first) == NULL) {
379		splx(s);
380		return NULL;
381	}
382	TAILQ_REMOVE(&bswlist, bp, b_freelist);
383	splx(s);
384
385	bzero(bp, sizeof *bp);
386	bp->b_rcred = NOCRED;
387	bp->b_wcred = NOCRED;
388	bp->b_data = (caddr_t) (MAXPHYS * (bp-swbuf)) + swapbkva;
389	return bp;
390}
391
392/*
393 * release a physical buffer
394 */
395void
396relpbuf(bp)
397	struct buf *bp;
398{
399	int s;
400
401	s = splbio();
402
403	if (bp->b_rcred != NOCRED) {
404		crfree(bp->b_rcred);
405		bp->b_rcred = NOCRED;
406	}
407	if (bp->b_wcred != NOCRED) {
408		crfree(bp->b_wcred);
409		bp->b_wcred = NOCRED;
410	}
411
412	if (bp->b_vp)
413		brelvp(bp);
414
415	TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
416
417	if (bswneeded) {
418		bswneeded = 0;
419		wakeup((caddr_t)&bswlist);
420	}
421	splx(s);
422}
423
424
425